code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import yaml
class ConfigChanger(object):
''' class to read/write the config file
'''
def __init__(self, location):
self.loc = location # path to yaml file
def config_file_ok(self):
''' returns boolean if config file is OK and contains good values,
or false if config needs to be edited
'''
# try to load the config
try:
f = open(self.loc, 'r')
except:
return False
# check for default values
try:
config = yaml.load(f)
if config['github']['accesstoken'] == 'secret_access_token':
return False
if config['github']['org_name'] == 'org_name':
return False
if config['github']['username'] == 'githubhandle':
return False
except:
return False
return True
def write_config(self, config):
''' write the yaml file '''
f = open(self.loc, 'w')
return yaml.dump(config, f)
def load_config(self):
''' load the yaml file; return it '''
f = open(self.loc, 'r')
return yaml.load(f)
def get_empty_config(self):
return {'github': {'accesstoken': 'secret_access_token',
'org_name': 'org_name',
'username': 'githubhandle'},
'log': {'dateformat': '%Y-%m-%d %H:%M:%S',
'file': 'ghcc.log',
'format': '[%(asctime)s] [%(levelname)s] - %(message)s'}
}
| [
"yaml.load",
"yaml.dump"
] | [((1015, 1035), 'yaml.dump', 'yaml.dump', (['config', 'f'], {}), '(config, f)\n', (1024, 1035), False, 'import yaml\n'), ((1157, 1169), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1166, 1169), False, 'import yaml\n'), ((539, 551), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (548, 551), False, 'import yaml\n')] |
from QuoteEngine import Ingestor, QuoteModel
from MemeGenerator import MemeEngine
from PIL import Image
import argparse
import random
import os
import textwrap
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('utils.log')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
def open_image(category):
"""
Opens an image from a user-specified category.
Parameters
----------
category : str
image category (dog or book, default=dog)
"""
images = "./_data/photos/book/"
if category == 'dog':
images = "./_data/photos/dog/"
imgs = []
for root, dirs, files in os.walk(images):
imgs = [os.path.join(root, name) for name in files]
return random.choice(imgs)
def open_image_app():
"""
Returns images for building the meme.
Parameters
----------
category : str
image category (dog or book, default=dog)
"""
images = "./_data/photos/dog/"
imgs = []
for root, dirs, files in os.walk(images):
imgs = [os.path.join(root, name) for name in files]
return imgs
def open_quote(category):
"""
Opens a quote from a user-specified category.
Parameters
----------
category : str
image category (dog or book, default=dog)
"""
quote_files = ['./_data/BookQuotes/BookQuotesDOCX.docx']
if category == 'dog':
quote_files = ['./_data/DogQuotes/DogQuotesTXT.txt',
'./_data/DogQuotes/DogQuotesDOCX.docx',
'./_data/DogQuotes/DogQuotesPDF.pdf',
'./_data/DogQuotes/DogQuotesCSV.csv']
quotes = []
for f in quote_files:
quotes.extend(Ingestor.parse(f))
return random.choice(quotes)
def open_quote_app():
"""
Return quotes for building the meme.
Parameters
----------
category : str
image category (dog or book, default=dog)
"""
quote_files = ['./_data/DogQuotes/DogQuotesTXT.txt',
'./_data/DogQuotes/DogQuotesDOCX.docx',
'./_data/DogQuotes/DogQuotesPDF.pdf',
'./_data/DogQuotes/DogQuotesCSV.csv']
quotes = []
for f in quote_files:
quotes.extend(Ingestor.parse(f))
return quotes
def image_resize(img_path, width=500):
"""
Resize an image to be used by make_meme()
Paramters
---------
img_path : str
image file path
width : int
width of image in pixels (default = 500)
"""
MAX_WIDTH: int = 500
assert width is not None, 'Width is zero'
assert width >= MAX_WIDTH, 'Width > 500'
img_path = img_path
with Image.open(img_path) as img:
ratio = width/float(img.size[0])
height = int(ratio*img.size[1])
img = img.resize((width, height))
return img
def text_draw(draw, text, author, fill, font, width, height):
"""
Draw text in random location on image.
Paramters
---------
draw : image object
image
text : str
quote text
author : str
quote text
fill : tuple
text fill
font : font object
text font
width : int
image width
height : int
image height
"""
x_max = int(0.6*width)
y_max = int(0.8*height)
x = random.randint(15, x_max)
y = random.randint(20, y_max)
wrap_limit = (width - x)*0.08
text = textwrap.fill(text, wrap_limit)
if len(text+author) > (height-y)*0.5:
draw.text((20, 20), text=text+'\n'+'-'+author, fill=fill, font=font)
else:
draw.text((x, y), text=text+'\n'+'-'+author, fill=fill, font=font)
return draw
| [
"logging.getLogger",
"logging.StreamHandler",
"random.choice",
"PIL.Image.open",
"logging.Formatter",
"os.path.join",
"textwrap.fill",
"logging.FileHandler",
"random.randint",
"os.walk",
"QuoteEngine.Ingestor.parse"
] | [((185, 212), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (202, 212), False, 'import logging\n'), ((257, 315), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s:%(levelname)s:%(message)s"""'], {}), "('%(asctime)s:%(levelname)s:%(message)s')\n", (274, 315), False, 'import logging\n'), ((332, 364), 'logging.FileHandler', 'logging.FileHandler', (['"""utils.log"""'], {}), "('utils.log')\n", (351, 364), False, 'import logging\n'), ((456, 479), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (477, 479), False, 'import logging\n'), ((925, 940), 'os.walk', 'os.walk', (['images'], {}), '(images)\n', (932, 940), False, 'import os\n'), ((1295, 1310), 'os.walk', 'os.walk', (['images'], {}), '(images)\n', (1302, 1310), False, 'import os\n'), ((3579, 3604), 'random.randint', 'random.randint', (['(15)', 'x_max'], {}), '(15, x_max)\n', (3593, 3604), False, 'import random\n'), ((3613, 3638), 'random.randint', 'random.randint', (['(20)', 'y_max'], {}), '(20, y_max)\n', (3627, 3638), False, 'import random\n'), ((3684, 3715), 'textwrap.fill', 'textwrap.fill', (['text', 'wrap_limit'], {}), '(text, wrap_limit)\n', (3697, 3715), False, 'import textwrap\n'), ((1017, 1036), 'random.choice', 'random.choice', (['imgs'], {}), '(imgs)\n', (1030, 1036), False, 'import random\n'), ((2016, 2037), 'random.choice', 'random.choice', (['quotes'], {}), '(quotes)\n', (2029, 2037), False, 'import random\n'), ((2935, 2955), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2945, 2955), False, 'from PIL import Image\n'), ((958, 982), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (970, 982), False, 'import os\n'), ((1328, 1352), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (1340, 1352), False, 'import os\n'), ((1982, 1999), 'QuoteEngine.Ingestor.parse', 'Ingestor.parse', (['f'], {}), '(f)\n', (1996, 1999), False, 'from QuoteEngine import Ingestor, QuoteModel\n'), ((2511, 2528), 'QuoteEngine.Ingestor.parse', 'Ingestor.parse', (['f'], {}), '(f)\n', (2525, 2528), False, 'from QuoteEngine import Ingestor, QuoteModel\n')] |
# -*- coding: utf-8 -*-
import requests
def get_all_commits(base_url, token, project_id, filter_author=''):
res = []
next_page = 1
url_format = '{}/api/v4/projects/{}/repository/commits?ref=master&per_page=100&page={}'
while next_page != '':
url = url_format.format(base_url, project_id, next_page)
resp = requests.get(url, headers={'Private-Token': token})
next_page = resp.headers.get('X-Next-Page')
if filter_author == '':
res.extend(resp.json())
else:
for commit in resp.json():
if commit['author_name'] == filter_author:
res.append(commit)
return res
def get_commit_detail(base_url, token, project_id, commit_id):
url = '{}/api/v4/projects/{}/repository/commits/{}'.format(base_url, project_id, commit_id)
resp = requests.get(url, headers={'Private-Token': token})
return resp.json()
| [
"requests.get"
] | [((850, 901), 'requests.get', 'requests.get', (['url'], {'headers': "{'Private-Token': token}"}), "(url, headers={'Private-Token': token})\n", (862, 901), False, 'import requests\n'), ((340, 391), 'requests.get', 'requests.get', (['url'], {'headers': "{'Private-Token': token}"}), "(url, headers={'Private-Token': token})\n", (352, 391), False, 'import requests\n')] |
import pandas as pd
df=pd.read_json("D:\eiaScrapper\eio.jl")
print(df.info()) | [
"pandas.read_json"
] | [((25, 64), 'pandas.read_json', 'pd.read_json', (['"""D:\\\\eiaScrapper\\\\eio.jl"""'], {}), "('D:\\\\eiaScrapper\\\\eio.jl')\n", (37, 64), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 21 20:00:50 2020
@author: takada
"""
import logging
import numpy as np
import functools
import operator
from typing import List, Dict, Callable
import time
import nidaqmx
from nidaqmx.stream_writers import (
DigitalSingleChannelWriter, AnalogMultiChannelWriter)
from qcodes import Instrument, VisaInstrument, validators as vals
from qcodes.instrument.channel import InstrumentChannel
from qcodes.instrument.parameter import ArrayParameter, Parameter
from qcodes.dataset.sqlite.database import connect
from qcodes.dataset.sqlite.queries import get_last_run
from qcodes.dataset.data_set import load_by_id
log = logging.getLogger(__name__)
class NI6733_ao_voltage_trace(ArrayParameter):
def __init__(self, name:str, instrument: InstrumentChannel,
channum: int) -> None:
"""
This voltage trace parameter is attached to a channel of the analog output.
Parameters
----------
name : str
Name of the trace.
instrument : InstrumentChannel
Instrument channel, where the trace is attached.
channum : int
Integer number of the channel, where the trace is attached.
Returns
-------
None
DESCRIPTION.
"""
super().__init__(name=name,
shape=(1,),
label = 'voltage',
unit='V',
setpoint_names=('Count',),
setpoint_labels=('Count',),
setpoint_units=('pts',),
setpoints = None,
docstring='Holds analog output trace')
self.channum = channum
self._instrument = instrument
def get_raw(self):
pass
class NI6733_ao_voltage_channel(InstrumentChannel):
def __init__(self, parent: Instrument, name:str,
slot_num:int, channum: int, min_val:float=-10.0,
fast_sequence:bool=False, fast_sequence_delta:float = -0.1,
max_val:float= 10.0) -> None:
"""
Parameters
----------
parent : Instrument
Host instrument handler
name : str
Given name of the channel
slot_num : int
Slot number of the channel
channum : int
Channel number
min_val : float, optional
Minimum value of the channel voltage value. The default is -10.0.
max_val : float, optional
Maximum value of the channel voltage value. The default is 10.0.
fast_sequence : bool, optional
Whether this dac is used for fast sequence or not.
fast_sequence_delta: float
How far the voltage is moved by the fast sequence from its original position.
Returns
-------
None
DESCRIPTION.
"""
super().__init__(parent, name)
self.instrument = parent
self.slot_num = slot_num
self.channum = channum
self._min_val = min_val
self._max_val = max_val
self._current_val = 0.0
self._target_val = None
self._fast_sequence = fast_sequence
self._fast_sequence_delta = fast_sequence_delta
self.add_parameter('min_val',
label = 'Minimum value',
unit = 'V',
get_cmd=self.get_min_val,
set_cmd=self.set_min_val,
vals = vals.Numbers(-10.0, 10.0)
)
self.add_parameter('max_val',
label = 'Maximum value',
unit = 'V',
get_cmd=self.get_max_val,
set_cmd=self.set_max_val,
vals = vals.Numbers(-10.0, 10.0)
)
self.add_parameter('cv',
label = 'Current value',
unit = 'V',
get_cmd=self.get_current_val,
set_cmd=self.set_current_val,
vals = vals.Numbers(-5.0, 5.0)
)
self.add_parameter('fs',
label='fast sequence',
get_cmd = self.get_fast_sequence,
set_cmd = self.set_fast_sequence,
)
self.add_parameter('fs_delta',
label = 'fast sequence delta',
unit = 'V',
get_cmd = self.get_fast_sequence_delta,
set_cmd = self.set_fast_sequence_delta,
vals = vals.Numbers(-1.0, 1.0)
)
def get_min_val(self):
return self._min_val
def set_min_val(self, val:float):
self._min_val = val
def get_max_val(self):
return self._max_val
def set_max_val(self, val:float):
self._max_val = val
def get_current_val(self):
return self._current_val
def set_current_val(self, val:float):
self._target_val = val
def get_fast_sequence(self):
return self._fast_sequence
def set_fast_sequence(self, val:bool):
self._fast_sequence = val
self.instrument._fs_ready = False
def get_fast_sequence_delta(self):
return self._fast_sequence_delta
def set_fast_sequence_delta(self, val:float):
self._fast_sequence_delta = val
self.instrument._fs_ready = False
class NI6733(Instrument):
def __init__(self, name:str, device_name:str = 'PXI2',
slots:List[int]=[3,4,], ms2wait:float = 2.0,
fast_sequence_divider:float = 2.0, fs_pts:int = 101,
**kwargs):
"""
This is the qcodes driver for NI6733 16 bit Analog Output.
Args:
name (str): Given name of the DAC
device_name (str): Name of the PXI device. Default value is 'PXI2'.
slots(List[int]): List of DAC slots. Each slot has 8 DAC channels.
ms2wait (float): Wait time between minimum resolution DAC movement in [ms].
fast_sequence_divider (float): Time between fast sequence movement in [ms].
fs_pts (int): Length of the fast sequence.
"""
super().__init__(name, **kwargs)
self.device_name = device_name
self.slots = slots
self._ms2wait = ms2wait
self._fast_sequence_divider = fast_sequence_divider
self._fs_pts = fs_pts
self._fs_ready = False
self._fast_move_slot_list = list()
self._fast_move_channel_list = dict()
self._fast_move_list = dict()
self._move_points = None
self.write_task = dict()
self.fast_seq_task = dict()
for slot in self.slots:
self.write_task[slot] = nidaqmx.Task()
self.write_task['{:d}'.format(slot)] = False
self.fast_seq_task[slot] = nidaqmx.Task()
self.fast_seq_task['{:d}'.format(slot)] = False
self.ctr_task = nidaqmx.Task()
self.ctr_task_isClosed = False
self.do_task = nidaqmx.Task()
self.do_task_isClosed = False
self.add_parameter('ms2wait',
label = 'ms to wait',
unit = 'ms',
get_cmd = self.get_ms2wait,
set_cmd = self.set_ms2wait,
vals = vals.Numbers(0.0, 100.0))
self.add_parameter('fs_div',
label = 'fast sequence divider',
unit = 'ms',
get_cmd = self.get_fast_sequence_divider,
set_cmd = self.set_fast_sequence_divider,
vals = vals.Numbers(0.0, 100.0))
self.add_parameter('fs_pts',
label = 'fast sequence size',
unit = 'pts',
get_cmd = self.get_fs_pts,
set_cmd = self.set_fs_pts,
vals = vals.Ints(2, 100000)
)
######################
# Add channels to the instrument
for slot in self.slots:
for i in range(8):
chan = NI6733_ao_voltage_channel(self,
'analog_output_s{:d}c{:d}'.format(slot, i),
slot_num = slot,
channum = i)
self.add_submodule('s{:d}c{:d}'.format(slot, i), chan)
###########################
# Function for parameters
###########################
def get_ms2wait(self):
return self._ms2wait
def set_ms2wait(self, val:float):
self._ms2wait = val
def get_fast_sequence_divider(self):
return self._fast_sequence_divider
def set_fast_sequence_divider(self, val:float):
self._fast_sequence_divider = val
self._fs_ready = False
def get_fs_pts(self):
return self._fs_pts
def set_fs_pts(self, val:int):
self._fs_pts = val
self._fs_ready = False
###########################
# Utility functions
###########################
def move_all_dac(self, v:float = 0.0):
"""
Move all the dac to the given value.
Scaling factor for each dac is not applied in this operation.
Parameters
----------
v : float, optional
Target voltage in volt. The default is 0.0.
Returns
-------
None.
"""
for s in self.slots:
for i in range(8):
chan = getattr(self, 's{:d}c{:d}'.format(s, i))
chan._target_val = v
self.DAC_move()
def init2zero(self):
"""
Initialise all the DAC values to be 0.0 V after moving once to -10 mV.
"""
self.move_all_dac()(-0.01)
self.move_all_dac()(0.0)
def load_current_values_from_database(self,
db_path:str = './experiments.db',
run_id:int = None,
):
"""
Load current DAC values from the specified database and run_id.
If run_id is not given, we load from the latest run_id.
Args:
db_path (str): Path to the database.
run_id (int): run_id of the recovered run.
"""
# Connect to the database
conn = connect(db_path)
if run_id == None:
# Get last run id
run_id = get_last_run(conn)
# Load dataset
dataset = load_by_id(run_id)
# Whether return to initial sweep position after the measurment or not
return2initial = dataset.snapshot['station']['instruments']['measurement_information']['parameters']['return2initial']['value']
# Collect information from sweeping parameters
data = dataset.get_parameter_data()
data_dict = dict()
for key in data.keys():
d = data[key]
for k in d.keys():
if not k in data_dict.keys():
data_dict[k] = d[k]
# Check whether measurement was complelted or not from data size
ar_size = d[k].size
fast_sweep = dataset.snapshot['station']['instruments']['measurement_information']['parameters']['fast_sweep']['value']
sweep_dims = dataset.snapshot['station']['instruments']['measurement_information']['parameters']['sweep_dims']['value']
if fast_sweep:
first_dim_size = dataset.snapshot['station']['instruments'][self.name]['parameters']['fs_pts']['value']
else:
first_dim_size = 1
total_pts = int(functools.reduce(operator.mul, sweep_dims, 1) * first_dim_size)
if not ar_size == total_pts:
completed = False
else:
completed = True
# Set current value of each dac from static values
for sm in dataset.snapshot['station']['instruments'][self.name]['submodules'].keys():
# Get raw value of each dac
cv = dataset.snapshot['station']['instruments'][self.name]['submodules'][sm]['parameters']['cv']['raw_value']
chan = getattr(self, sm)
sm_fullname = dataset.snapshot['station']['instruments'][self.name]['submodules'][sm]['parameters']['cv']['full_name']
if sm_fullname in data_dict.keys():
if return2initial and completed:
cv = data_dict[sm_fullname][0]
else:
cv = data_dict[sm_fullname][-1]
chan._current_val = cv
conn.close()
def init_tasks(self):
"""
Close all the task, which is opend. Then open it again.
"""
if not self.do_task_isClosed:
self.do_task.close()
self.do_task = nidaqmx.Task()
if not self.ctr_task_isClosed:
self.ctr_task.close()
self.ctr_task = nidaqmx.Task()
for slot in self.slots:
if not self.write_task['{:d}'.format(slot)]:
self.write_task[slot].close()
self.write_task[slot] = nidaqmx.Task()
if not self.fast_seq_task['{:d}'.format(slot)]:
self.fast_seq_task[slot].close()
self.fast_seq_task[slot] = nidaqmx.Task()
###################################
# Base functions for voltage output
###################################
def ctr_setup(self,
task:nidaqmx.Task = None,
slot_num:int = 3,
no_of_samples:int = None,
trigger_delay:int = 0.0,
):
"""
This function setup a counter output for the counter 0 for the given slot.
Args:
task(nidaqmx.Task): Task counter is set.
slot_num(int): Slot number of the trigger out
no_of_samples (int): Number of trigger generated. If it is None, a trigger is generated continuously.
trigger_delay (int): Delay of the counter in seconds.
"""
# Create counter output channel
task.co_channels.add_co_pulse_chan_freq('{}Slot{:d}/ctr0'.format(self.device_name, slot_num),
units = nidaqmx.constants.FrequencyUnits.HZ,
idle_state = nidaqmx.constants.Level.LOW,
initial_delay = trigger_delay,
freq = 1000.0/self._fast_sequence_divider,
duty_cycle = 0.5,
)
# Set sample generation mode and number of samples to be generated.
# Comment: Incrase 'samps_per_chan' by 3 since some trigger is missed by analog output.
task.timing.cfg_implicit_timing(samps_per_chan = no_of_samples+3,
sample_mode = nidaqmx.constants.AcquisitionType.FINITE)
def do_setup(self,
task:nidaqmx.Task = None,
slot_num:int = 3,
port_num:int = 0,
line_num:int = 0,
initial_delay:int = 1,
trigger_length:int = 2,
sample_clk_src:str = '/PXI2Slot3/Ctr0InternalOutput',
):
"""
This function setup digital output task used to trigger ADC.
Parameters
----------
task : nidaqmx.Task, optional
task, where the digital output channel is set.
slot_num : int, optional
Slot number. The default is 3.
port_num : int, optional
Port number of digital output. The default is 0.
line_num : int, optional
Line number of digital output. The default is 0.
initial_delay : int, optional
Initial delay of the generated start trigger in a unit of a clock. The default is 1.
trigger_length : int, optional
Length of the trigger in a unit of a clock sample. The default is 2.
sample_clk_src : str, optional
Sample clock source. The default is '/PXI2Slot3/Ctr0InternalOutput'.
: TYPE
DESCRIPTION.
Returns
-------
None.
"""
# Calculate number of points for the trigger
points = initial_delay + trigger_length + 10
# Create digital output channel
task.do_channels.add_do_chan(lines = '{}Slot{:d}/port{:d}/line{:d}'.format(self.device_name, slot_num, port_num, line_num))
# Setup timing
task.timing.cfg_samp_clk_timing(rate = 100000,
source = sample_clk_src,
active_edge=nidaqmx.constants.Edge.RISING,
sample_mode=nidaqmx.constants.AcquisitionType.FINITE,
samps_per_chan = points
)
# Write array information of the pulse
writer = DigitalSingleChannelWriter(task.out_stream)
ar = np.zeros((points,), dtype=np.uint8)
ar[initial_delay:initial_delay+trigger_length] = 2 ** line_num
writer.write_many_sample_port_byte(ar)
def set_sample_clock(self,
task:nidaqmx.Task = None,
no_of_samples:int=None,
sample_rate:float=500.0,
sample_clk_src:str=None,
):
"""
This function setup the sample clock timing.
Parameters
----------
task : nidaqmx.Task, optional
task, where the sample clock to be set.
no_of_samples : int, optional
Number of samples (data points) to be generated. If it is None, clock mode becomes
continuous.
sample_rate : float, optional
Sampling rate in Hz. The default is 500.0 Hz.
samle_clk_src : str, optional
Sample clock source. We can set extra source. If it is None,
we use a default onboard clock.
Returns
-------
None.
"""
if sample_clk_src == None:
sample_clk_src = 'OnboardClock'
task.timing.cfg_samp_clk_timing(sample_rate,
source = sample_clk_src,
active_edge=nidaqmx.constants.Edge.RISING,
sample_mode=nidaqmx.constants.AcquisitionType.FINITE,
samps_per_chan = no_of_samples)
def DAC_move(self,
task_preparation:bool=True,
clear_task:bool=True):
"""
This function moves the DAC values, whose target value is changed.
Args:
task_preparation (bool): Whether prepare analog output and sample clock to the task.
clear_task (bool): Whether we clear the task after the movement or not.
"""
move_slot_list = list()
move_channel_list = dict()
move_list = dict()
largest_move = 0.0
for slot in self.slots:
move_channel_list[slot] = list()
move_list[slot] = list()
for i in range(8):
chan = getattr(self, 's{:d}c{:d}'.format(slot, i))
if not chan._target_val == None:
move_channel_list[slot].append(chan)
move_slot_list.append(slot)
cv = chan._current_val # Current DAC value
tv = chan._target_val # Target DAC value
move_list[slot].append((cv, tv)) # Keep the value
delta = abs(tv - cv) # Size of the movement
if delta > largest_move:
# Check largest movement to determine number of points.
largest_move = delta
# Convert move_slot_list to set
move_slot_list = set(move_slot_list)
# Calculate points
points = max(2, int((largest_move/(20/2.0**16)//2.0)*2.0))
# Keep points and re-define task when it changes
if not self._move_points == points:
self._move_points = points
task_preparation = True
# Create array for movement
ar = dict()
for slot in move_slot_list:
ar_list = list()
for v in move_list[slot]:
ar_list.append(np.linspace(v[0],v[1], self._move_points,dtype=float))
ar[slot] = np.vstack(tuple(ar_list))
if task_preparation:
# Clear task (It takes a few ms.)
for slot in move_slot_list:
if not self.write_task['{:d}'.format(slot)]:
self.write_task[slot].close()
self.write_task[slot] = nidaqmx.Task()
self.write_task['{:d}'.format(slot)] = False
# Create analog output channel in the task
for chan in move_channel_list[slot]:
self.write_task[slot].ao_channels.add_ao_voltage_chan(physical_channel = '{}Slot{:d}/ao{:d}'.format(self.device_name, chan.slot_num, chan.channum),
min_val = chan.min_val(),
max_val = chan.max_val(),
units = nidaqmx.constants.VoltageUnits.VOLTS)
# Setup sample clock
self.set_sample_clock(task = self.write_task[slot],
no_of_samples = self._move_points,
sample_rate = 1000.0/self.ms2wait(),
sample_clk_src = None,)
writer = dict()
for slot in move_slot_list:
# Output voltage
writer[slot] = AnalogMultiChannelWriter(self.write_task[slot].out_stream)
writer[slot].write_many_sample(ar[slot])
for slot in move_slot_list:
self.write_task[slot].start()
for slot in move_slot_list:
self.write_task[slot].wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.write_task[slot].stop()
if clear_task:
# Clear task (It takes a few ms.)
for slot in move_slot_list:
self.write_task[slot].close()
self.write_task['{:d}'.format(slot)] = True
# Update information for the moved channels
for slot in move_slot_list:
for chan in move_channel_list[slot]:
chan._current_val = chan._target_val
chan._target_val = None
def prepare_fast_move(self):
"""
This function prepare the task for fast movement.
"""
self._fast_move_slot_list = list()
self._fast_move_channel_list = dict()
self._fast_move_list = dict()
for slot in self.slots:
self._fast_move_channel_list[slot] = list()
self._fast_move_list[slot] = list()
for i in range(8):
chan = getattr(self, 's{:d}c{:d}'.format(slot, i))
if chan.fs():
self._fast_move_slot_list.append(slot)
self._fast_move_channel_list[slot].append(chan)
v0 = chan._current_val
v1 = chan._current_val + chan._fast_sequence_delta
self._fast_move_list[slot].append((v0, v1))
# Convert fast_move_slot_list to set.
self._fast_move_slot_list = set(self._fast_move_slot_list)
# Clear the counter task
if not self.ctr_task_isClosed:
self.ctr_task.close()
self.ctr_task = nidaqmx.Task()
self.ctr_task_isClosed = False
# Setup counter
self.ctr_setup(task = self.ctr_task,
slot_num = self.slots[0],
no_of_samples = self.fs_pts(),
trigger_delay = 0.0,
)
# Clear the digital out task
if not self.do_task_isClosed:
self.do_task.close()
self.do_task = nidaqmx.Task()
self.do_task_isClosed = False
# Setup digital output
self.do_setup(task = self.do_task,
slot_num = self.slots[0],
port_num = 0,
line_num = 0,
initial_delay = 0,
trigger_length = 1,
sample_clk_src = '/{}Slot{:d}/Ctr0InternalOutput'.format(self.device_name, self.slots[0]),
)
self._fs_ready = True
def DAC_fast_move(self):
"""
This function makes fast sequence of the DAC.
--> This function gets a problem when we use in a QuCoDeS. It is not possible
to use DAC_move task and DAC_fast move task at the same time.
"""
if not self._fs_ready:
raise ValueError('Fase sequence is not ready. Please perform "prepare_fast_move".')
# Number of array points has to be even. I adjust for that.
if int(self.fs_pts()%2) == 0:
points = self.fs_pts()+1
else:
points = self.fs_pts()
# Set up channels
for slot in self._fast_move_slot_list:
# Define fast sequence task
if not self.fast_seq_task['{:d}'.format(slot)]:
self.fast_seq_task[slot].close()
self.fast_seq_task[slot] = nidaqmx.Task()
self.fast_seq_task['{:d}'.format(slot)] = False
# Create analog output channel in the task
for chan in self._fast_move_channel_list[slot]:
self.fast_seq_task[slot].ao_channels.add_ao_voltage_chan(physical_channel = '{}Slot{:d}/ao{:d}'.format(self.device_name, chan.slot_num, chan.channum),
min_val = chan.min_val(),
max_val = chan.max_val(),
units = nidaqmx.constants.VoltageUnits.VOLTS)
# Setup sample clock
self.set_sample_clock(task = self.fast_seq_task[slot],
no_of_samples=points+1,
sample_rate=1000.0/self._fast_sequence_divider,
sample_clk_src='/{}Slot{:d}/Ctr0InternalOutput'.format(self.device_name, self.slots[0]),)
ar_dict = dict()
writer = dict()
for slot in self._fast_move_slot_list:
# Create array for fast movement
ar_list = list()
for chan in self._fast_move_channel_list[slot]:
v0 = chan._current_val
v1 = chan._current_val + chan._fast_sequence_delta
ar = np.empty((points+1,), dtype=float)
ar[0:self.fs_pts()] = np.linspace(v0, v1, self.fs_pts(), dtype=float)
ar[self.fs_pts()] = v0
if int(self.fs_pts()%2) == 0:
ar[self.fs_pts()+1] = v0
ar_list.append(ar)
ar_dict[slot] = np.vstack(tuple(ar_list))
# Output voltage
writer[slot] = AnalogMultiChannelWriter(self.fast_seq_task[slot].out_stream)
writer[slot].write_many_sample(ar_dict[slot])
for slot in self._fast_move_slot_list:
self.fast_seq_task[slot].start()
self.do_task.start()
self.ctr_task.start()
for slot in self._fast_move_slot_list:
self.fast_seq_task[slot].wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.fast_seq_task[slot].stop()
self.fast_seq_task[slot].close()
self.fast_seq_task['{:d}'.format(slot)] = True
self.do_task.wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.do_task.stop()
self.ctr_task.wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.ctr_task.stop()
if __name__ == '__main__':
t = time.time()
dac = NI6733(name = 'dac',
device_name = 'PXI2',
slots=[3,4,],
ms2wait = 2.0,
fast_sequence_divider = 2.0,
fs_pts = 201,
)
# # DAC movement test
# dac.s3c0.cv(-0.1)
# dac.s4c0.cv(-0.3)
# dac.DAC_move(task_preparation = True,
# clear_task = False)
# dac.s3c0.cv(-0.3)
# dac.s4c0.cv(-0.5)
# dac.DAC_move(task_preparation = False,
# clear_task = False)
# dac.s3c0.cv(-0.5)
# dac.s4c0.cv(-0.7)
# dac.DAC_move(task_preparation = False,
# clear_task = False)
# dac.s3c0.cv(0.0)
# dac.s4c0.cv(0.0)
# dac.DAC_move(task_preparation = False,
# clear_task = True)
# # Trigger test
# dac.ctr_setup(slot_num = 3,
# no_of_samples = 20,
# trigger_delay = 0.1)
# dac.ctr_task.start()
# dac.ctr_task.wait_until_done()
# # time.sleep(5)
# dac.ctr_task.stop()
# Fast sequence test
dac.fs_pts(201)
dac.fs_div(2.0)
dac.s3c0.fs(True)
dac.s3c0.fs_delta(-1.0)
dac.prepare_fast_move()
dac.DAC_fast_move()
print('Execution time {:f}'.format(time.time() - t)) | [
"logging.getLogger",
"qcodes.dataset.sqlite.queries.get_last_run",
"functools.reduce",
"nidaqmx.Task",
"qcodes.dataset.sqlite.database.connect",
"qcodes.validators.Numbers",
"numpy.zeros",
"numpy.linspace",
"qcodes.validators.Ints",
"numpy.empty",
"qcodes.dataset.data_set.load_by_id",
"nidaqmx... | [((664, 691), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (681, 691), False, 'import logging\n'), ((29407, 29418), 'time.time', 'time.time', ([], {}), '()\n', (29416, 29418), False, 'import time\n'), ((7386, 7400), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (7398, 7400), False, 'import nidaqmx\n'), ((7463, 7477), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (7475, 7477), False, 'import nidaqmx\n'), ((11022, 11038), 'qcodes.dataset.sqlite.database.connect', 'connect', (['db_path'], {}), '(db_path)\n', (11029, 11038), False, 'from qcodes.dataset.sqlite.database import connect\n'), ((11190, 11208), 'qcodes.dataset.data_set.load_by_id', 'load_by_id', (['run_id'], {}), '(run_id)\n', (11200, 11208), False, 'from qcodes.dataset.data_set import load_by_id\n'), ((17911, 17954), 'nidaqmx.stream_writers.DigitalSingleChannelWriter', 'DigitalSingleChannelWriter', (['task.out_stream'], {}), '(task.out_stream)\n', (17937, 17954), False, 'from nidaqmx.stream_writers import DigitalSingleChannelWriter, AnalogMultiChannelWriter\n'), ((17968, 18003), 'numpy.zeros', 'np.zeros', (['(points,)'], {'dtype': 'np.uint8'}), '((points,), dtype=np.uint8)\n', (17976, 18003), True, 'import numpy as np\n'), ((24912, 24926), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (24924, 24926), False, 'import nidaqmx\n'), ((25347, 25361), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (25359, 25361), False, 'import nidaqmx\n'), ((7176, 7190), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (7188, 7190), False, 'import nidaqmx\n'), ((7287, 7301), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (7299, 7301), False, 'import nidaqmx\n'), ((11117, 11135), 'qcodes.dataset.sqlite.queries.get_last_run', 'get_last_run', (['conn'], {}), '(conn)\n', (11129, 11135), False, 'from qcodes.dataset.sqlite.queries import get_last_run\n'), ((13515, 13529), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (13527, 13529), False, 'import nidaqmx\n'), ((13644, 13658), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (13656, 13658), False, 'import nidaqmx\n'), ((22939, 22997), 'nidaqmx.stream_writers.AnalogMultiChannelWriter', 'AnalogMultiChannelWriter', (['self.write_task[slot].out_stream'], {}), '(self.write_task[slot].out_stream)\n', (22963, 22997), False, 'from nidaqmx.stream_writers import DigitalSingleChannelWriter, AnalogMultiChannelWriter\n'), ((26728, 26742), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (26740, 26742), False, 'import nidaqmx\n'), ((28549, 28610), 'nidaqmx.stream_writers.AnalogMultiChannelWriter', 'AnalogMultiChannelWriter', (['self.fast_seq_task[slot].out_stream'], {}), '(self.fast_seq_task[slot].out_stream)\n', (28573, 28610), False, 'from nidaqmx.stream_writers import DigitalSingleChannelWriter, AnalogMultiChannelWriter\n'), ((3624, 3649), 'qcodes.validators.Numbers', 'vals.Numbers', (['(-10.0)', '(10.0)'], {}), '(-10.0, 10.0)\n', (3636, 3649), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((3957, 3982), 'qcodes.validators.Numbers', 'vals.Numbers', (['(-10.0)', '(10.0)'], {}), '(-10.0, 10.0)\n', (3969, 3982), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((4293, 4316), 'qcodes.validators.Numbers', 'vals.Numbers', (['(-5.0)', '(5.0)'], {}), '(-5.0, 5.0)\n', (4305, 4316), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((4902, 4925), 'qcodes.validators.Numbers', 'vals.Numbers', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (4914, 4925), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((7796, 7820), 'qcodes.validators.Numbers', 'vals.Numbers', (['(0.0)', '(100.0)'], {}), '(0.0, 100.0)\n', (7808, 7820), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((8140, 8164), 'qcodes.validators.Numbers', 'vals.Numbers', (['(0.0)', '(100.0)'], {}), '(0.0, 100.0)\n', (8152, 8164), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((8452, 8472), 'qcodes.validators.Ints', 'vals.Ints', (['(2)', '(100000)'], {}), '(2, 100000)\n', (8461, 8472), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((12299, 12344), 'functools.reduce', 'functools.reduce', (['operator.mul', 'sweep_dims', '(1)'], {}), '(operator.mul, sweep_dims, 1)\n', (12315, 12344), False, 'import functools\n'), ((13847, 13861), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (13859, 13861), False, 'import nidaqmx\n'), ((14031, 14045), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (14043, 14045), False, 'import nidaqmx\n'), ((21828, 21842), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (21840, 21842), False, 'import nidaqmx\n'), ((28140, 28176), 'numpy.empty', 'np.empty', (['(points + 1,)'], {'dtype': 'float'}), '((points + 1,), dtype=float)\n', (28148, 28176), True, 'import numpy as np\n'), ((30680, 30691), 'time.time', 'time.time', ([], {}), '()\n', (30689, 30691), False, 'import time\n'), ((21449, 21504), 'numpy.linspace', 'np.linspace', (['v[0]', 'v[1]', 'self._move_points'], {'dtype': 'float'}), '(v[0], v[1], self._move_points, dtype=float)\n', (21460, 21504), True, 'import numpy as np\n')] |
from django.urls import path
from . import views
app_name = 'v2_trip'
urlpatterns = [
path('', views.main, name='main'),
path('<int:pk>/', views.item_form, name='item_form'),
path('persons/', views.go_persons, name='go_persons'),
path('trips/', views.go_trips, name='go_trips'),
path('entity/<str:name>/<int:pk>/', views.trip_entity, name = 'trip_entity'),
]
| [
"django.urls.path"
] | [((91, 124), 'django.urls.path', 'path', (['""""""', 'views.main'], {'name': '"""main"""'}), "('', views.main, name='main')\n", (95, 124), False, 'from django.urls import path\n'), ((145, 197), 'django.urls.path', 'path', (['"""<int:pk>/"""', 'views.item_form'], {'name': '"""item_form"""'}), "('<int:pk>/', views.item_form, name='item_form')\n", (149, 197), False, 'from django.urls import path\n'), ((204, 257), 'django.urls.path', 'path', (['"""persons/"""', 'views.go_persons'], {'name': '"""go_persons"""'}), "('persons/', views.go_persons, name='go_persons')\n", (208, 257), False, 'from django.urls import path\n'), ((264, 311), 'django.urls.path', 'path', (['"""trips/"""', 'views.go_trips'], {'name': '"""go_trips"""'}), "('trips/', views.go_trips, name='go_trips')\n", (268, 311), False, 'from django.urls import path\n'), ((322, 396), 'django.urls.path', 'path', (['"""entity/<str:name>/<int:pk>/"""', 'views.trip_entity'], {'name': '"""trip_entity"""'}), "('entity/<str:name>/<int:pk>/', views.trip_entity, name='trip_entity')\n", (326, 396), False, 'from django.urls import path\n')] |
from dataclasses import dataclass
from my_dataclasses.member import Member
from my_dataclasses.sport import Sport
@dataclass(order=True, frozen=True)
class Plays(object):
member: Member
sport: Sport
| [
"dataclasses.dataclass"
] | [((117, 151), 'dataclasses.dataclass', 'dataclass', ([], {'order': '(True)', 'frozen': '(True)'}), '(order=True, frozen=True)\n', (126, 151), False, 'from dataclasses import dataclass\n')] |
from flask_mail import Mail
from singletons.app import _app
mail = Mail(_app)
| [
"flask_mail.Mail"
] | [((68, 78), 'flask_mail.Mail', 'Mail', (['_app'], {}), '(_app)\n', (72, 78), False, 'from flask_mail import Mail\n')] |
#!/usr/bin/env python3
import sys, json, argparse
parser = argparse.ArgumentParser()
parser.add_argument("--empty_error", action="store_true", help="If present, do not print error message")
args = parser.parse_args()
data=json.load(sys.stdin)
if 'results' in data:
for result in data['results']:
if 'series' in result:
print(result['series'][0]['columns'][1] + ': ' + str(result['series'][0]['values'][0][1]))
elif not args.empty_error:
print("An expected result from CURL request is missing")
elif not args.empty_error:
print("No results returned from CURL request")
| [
"json.load",
"argparse.ArgumentParser"
] | [((60, 85), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (83, 85), False, 'import sys, json, argparse\n'), ((224, 244), 'json.load', 'json.load', (['sys.stdin'], {}), '(sys.stdin)\n', (233, 244), False, 'import sys, json, argparse\n')] |
# Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing flags applicable across benchmark run on IBM Cloud."""
from absl import flags
flags.DEFINE_string('ibmcloud_azone', None,
'IBMCloud internal DC name')
flags.DEFINE_integer('ibmcloud_volume_iops', 20000,
'Desired volume IOPS.')
flags.DEFINE_integer('ibmcloud_volume_bandwidth', None,
'Desired volume bandwidth in Mbps.')
flags.DEFINE_boolean('ibmcloud_volume_encrypted', False,
'Enable encryption on volume creates.')
flags.DEFINE_string('ibmcloud_image_username', 'root',
'Ssh username for cloud image.')
flags.DEFINE_integer('ibmcloud_polling_delay', 2,
'Delay between polling attempts in seconds.')
flags.DEFINE_integer('ibmcloud_timeout', 600,
'timeout in secs.')
flags.DEFINE_integer('ibmcloud_boot_disk_size', 10,
'boot volume disk size.')
flags.DEFINE_boolean('ibmcloud_debug', False,
'debug flag.')
flags.DEFINE_boolean('ibmcloud_resources_keep', False,
'keep resources.')
flags.DEFINE_string('ibmcloud_volume_profile', 'custom',
'volume profile')
flags.DEFINE_string('ibmcloud_bootvol_encryption_key', None,
'boot volume encryption key crn')
flags.DEFINE_string('ibmcloud_datavol_encryption_key', None,
'data volume encryption key crn')
flags.DEFINE_string('ibmcloud_vpcid', None,
'IBM Cloud vpc id')
flags.DEFINE_string('ibmcloud_subnet', None,
'primary subnet id')
flags.DEFINE_string('ibmcloud_networks', None,
'additional network ids, comma separated')
flags.DEFINE_string('ibmcloud_prefix', 'perfkit',
'resource name prefix')
flags.DEFINE_string('ibmcloud_rgid', None,
'Resource Group id for the account.')
flags.DEFINE_integer('ibmcloud_boot_volume_iops', 3000,
'boot voume iops')
flags.DEFINE_integer('ibmcloud_boot_volume_size', 0,
'boot voume size in GB')
flags.DEFINE_string('ibmcloud_pub_keyid', None,
'rias public sshkey id')
flags.DEFINE_integer('ibmcloud_network_mtu', 9000,
'MTU size on network interfaces.')
flags.DEFINE_integer('ibmcloud_subnets_extra', 0,
'extra subnets to lookup')
flags.DEFINE_integer('ibmcloud_vdisks_extra', 0,
'extra disks to create')
flags.DEFINE_string('ibmcloud_image_info', None,
'image info in json formatted file')
flags.DEFINE_boolean('ibmcloud_encrypted_image', False,
'encrypted image.')
| [
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_string",
"absl.flags.DEFINE_boolean"
] | [((711, 783), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_azone"""', 'None', '"""IBMCloud internal DC name"""'], {}), "('ibmcloud_azone', None, 'IBMCloud internal DC name')\n", (730, 783), False, 'from absl import flags\n'), ((805, 880), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ibmcloud_volume_iops"""', '(20000)', '"""Desired volume IOPS."""'], {}), "('ibmcloud_volume_iops', 20000, 'Desired volume IOPS.')\n", (825, 880), False, 'from absl import flags\n'), ((903, 999), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ibmcloud_volume_bandwidth"""', 'None', '"""Desired volume bandwidth in Mbps."""'], {}), "('ibmcloud_volume_bandwidth', None,\n 'Desired volume bandwidth in Mbps.')\n", (923, 999), False, 'from absl import flags\n'), ((1018, 1118), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""ibmcloud_volume_encrypted"""', '(False)', '"""Enable encryption on volume creates."""'], {}), "('ibmcloud_volume_encrypted', False,\n 'Enable encryption on volume creates.')\n", (1038, 1118), False, 'from absl import flags\n'), ((1137, 1228), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_image_username"""', '"""root"""', '"""Ssh username for cloud image."""'], {}), "('ibmcloud_image_username', 'root',\n 'Ssh username for cloud image.')\n", (1156, 1228), False, 'from absl import flags\n'), ((1246, 1345), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ibmcloud_polling_delay"""', '(2)', '"""Delay between polling attempts in seconds."""'], {}), "('ibmcloud_polling_delay', 2,\n 'Delay between polling attempts in seconds.')\n", (1266, 1345), False, 'from absl import flags\n'), ((1364, 1429), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ibmcloud_timeout"""', '(600)', '"""timeout in secs."""'], {}), "('ibmcloud_timeout', 600, 'timeout in secs.')\n", (1384, 1429), False, 'from absl import flags\n'), ((1452, 1529), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ibmcloud_boot_disk_size"""', '(10)', '"""boot volume disk size."""'], {}), "('ibmcloud_boot_disk_size', 10, 'boot volume disk size.')\n", (1472, 1529), False, 'from absl import flags\n'), ((1552, 1612), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""ibmcloud_debug"""', '(False)', '"""debug flag."""'], {}), "('ibmcloud_debug', False, 'debug flag.')\n", (1572, 1612), False, 'from absl import flags\n'), ((1635, 1708), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""ibmcloud_resources_keep"""', '(False)', '"""keep resources."""'], {}), "('ibmcloud_resources_keep', False, 'keep resources.')\n", (1655, 1708), False, 'from absl import flags\n'), ((1731, 1805), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_volume_profile"""', '"""custom"""', '"""volume profile"""'], {}), "('ibmcloud_volume_profile', 'custom', 'volume profile')\n", (1750, 1805), False, 'from absl import flags\n'), ((1827, 1925), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_bootvol_encryption_key"""', 'None', '"""boot volume encryption key crn"""'], {}), "('ibmcloud_bootvol_encryption_key', None,\n 'boot volume encryption key crn')\n", (1846, 1925), False, 'from absl import flags\n'), ((1943, 2041), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_datavol_encryption_key"""', 'None', '"""data volume encryption key crn"""'], {}), "('ibmcloud_datavol_encryption_key', None,\n 'data volume encryption key crn')\n", (1962, 2041), False, 'from absl import flags\n'), ((2059, 2122), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_vpcid"""', 'None', '"""IBM Cloud vpc id"""'], {}), "('ibmcloud_vpcid', None, 'IBM Cloud vpc id')\n", (2078, 2122), False, 'from absl import flags\n'), ((2144, 2209), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_subnet"""', 'None', '"""primary subnet id"""'], {}), "('ibmcloud_subnet', None, 'primary subnet id')\n", (2163, 2209), False, 'from absl import flags\n'), ((2231, 2324), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_networks"""', 'None', '"""additional network ids, comma separated"""'], {}), "('ibmcloud_networks', None,\n 'additional network ids, comma separated')\n", (2250, 2324), False, 'from absl import flags\n'), ((2342, 2415), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_prefix"""', '"""perfkit"""', '"""resource name prefix"""'], {}), "('ibmcloud_prefix', 'perfkit', 'resource name prefix')\n", (2361, 2415), False, 'from absl import flags\n'), ((2437, 2522), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_rgid"""', 'None', '"""Resource Group id for the account."""'], {}), "('ibmcloud_rgid', None, 'Resource Group id for the account.'\n )\n", (2456, 2522), False, 'from absl import flags\n'), ((2539, 2613), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ibmcloud_boot_volume_iops"""', '(3000)', '"""boot voume iops"""'], {}), "('ibmcloud_boot_volume_iops', 3000, 'boot voume iops')\n", (2559, 2613), False, 'from absl import flags\n'), ((2636, 2713), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ibmcloud_boot_volume_size"""', '(0)', '"""boot voume size in GB"""'], {}), "('ibmcloud_boot_volume_size', 0, 'boot voume size in GB')\n", (2656, 2713), False, 'from absl import flags\n'), ((2736, 2808), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_pub_keyid"""', 'None', '"""rias public sshkey id"""'], {}), "('ibmcloud_pub_keyid', None, 'rias public sshkey id')\n", (2755, 2808), False, 'from absl import flags\n'), ((2830, 2919), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ibmcloud_network_mtu"""', '(9000)', '"""MTU size on network interfaces."""'], {}), "('ibmcloud_network_mtu', 9000,\n 'MTU size on network interfaces.')\n", (2850, 2919), False, 'from absl import flags\n'), ((2938, 3014), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ibmcloud_subnets_extra"""', '(0)', '"""extra subnets to lookup"""'], {}), "('ibmcloud_subnets_extra', 0, 'extra subnets to lookup')\n", (2958, 3014), False, 'from absl import flags\n'), ((3037, 3110), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""ibmcloud_vdisks_extra"""', '(0)', '"""extra disks to create"""'], {}), "('ibmcloud_vdisks_extra', 0, 'extra disks to create')\n", (3057, 3110), False, 'from absl import flags\n'), ((3133, 3222), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""ibmcloud_image_info"""', 'None', '"""image info in json formatted file"""'], {}), "('ibmcloud_image_info', None,\n 'image info in json formatted file')\n", (3152, 3222), False, 'from absl import flags\n'), ((3240, 3315), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""ibmcloud_encrypted_image"""', '(False)', '"""encrypted image."""'], {}), "('ibmcloud_encrypted_image', False, 'encrypted image.')\n", (3260, 3315), False, 'from absl import flags\n')] |
import os
from utils import relative_path
# Hyperparams
ARCFACE_M = 0.5
ARCFACE_S = 10.
CENTERLOSS_ALPHA = 0.008
CENTERLOSS_LAMBDA = 0.5
EMBEDDING_SIZE = 256
MIN_FACES_PER_PERSON = 5 # Min num of samples per class - or class is removed
MAX_FACES_PER_PERSON = 200 # Max num of samples per class - additional samples are removed
MIN_FACES_UNSAMPLE = 5 # All classes with lower num of samples are upscaled to this num of samples
DEV_FACES_PER_PERSON = 2 # Number of images per person in dev data
BATCH_SIZE = 256
EPOCHS = 50
TARGET_IMG_WIDTH = 96
TARGET_IMG_HEIGHT = 112
MIN_IMG_WIDTH = TARGET_IMG_WIDTH # no image upscale allowed
MIN_IMG_HEIGHT = TARGET_IMG_HEIGHT # no image upscale allowed
INPUT_SHAPE = (TARGET_IMG_HEIGHT, TARGET_IMG_WIDTH, 3)
# Paths
MODEL_SAVE_PATH = os.environ.get('MODEL_SAVE_PATH', relative_path('../model/'))
VGG_TRAIN_PATH = os.environ.get('VGG_DATASET', relative_path('../data/VGGFace2/')) + '/train/'
VGG_TEST_PATH = os.environ.get('VGG_DATASET', relative_path('../data/VGGFace2/')) + '/test/'
VGG_BB_TRAIN_MAP = os.environ.get('BB_TRAIN', relative_path('../data/vggface_bb_landmark/loose_bb_train.csv'))
VGG_BB_TEST_MAP = os.environ.get('BB_TEST', relative_path('../data/vggface_bb_landmark/loose_bb_test.csv'))
CASIA_PATH = os.environ.get('CASIA_DATASET', relative_path('../data/CASIA-WebFace/'))
CASIA_BB_MAP = os.environ.get('CASIA_BB', relative_path('../data/casia_landmark.csv'))
LFW_PATH = os.environ.get('LFW_DATASET', relative_path('../data/lfw/'))
LFW_BB_MAP = os.environ.get('LFW_BB', relative_path('../data/lfw_landmark.csv'))
LFW_PAIRS_PATH = os.environ.get('LFW_PAIRS', relative_path('../data/lfw_pairs.txt'))
| [
"utils.relative_path"
] | [((910, 936), 'utils.relative_path', 'relative_path', (['"""../model/"""'], {}), "('../model/')\n", (923, 936), False, 'from utils import relative_path\n'), ((1204, 1267), 'utils.relative_path', 'relative_path', (['"""../data/vggface_bb_landmark/loose_bb_train.csv"""'], {}), "('../data/vggface_bb_landmark/loose_bb_train.csv')\n", (1217, 1267), False, 'from utils import relative_path\n'), ((1326, 1388), 'utils.relative_path', 'relative_path', (['"""../data/vggface_bb_landmark/loose_bb_test.csv"""'], {}), "('../data/vggface_bb_landmark/loose_bb_test.csv')\n", (1339, 1388), False, 'from utils import relative_path\n'), ((1447, 1486), 'utils.relative_path', 'relative_path', (['"""../data/CASIA-WebFace/"""'], {}), "('../data/CASIA-WebFace/')\n", (1460, 1486), False, 'from utils import relative_path\n'), ((1545, 1588), 'utils.relative_path', 'relative_path', (['"""../data/casia_landmark.csv"""'], {}), "('../data/casia_landmark.csv')\n", (1558, 1588), False, 'from utils import relative_path\n'), ((1647, 1676), 'utils.relative_path', 'relative_path', (['"""../data/lfw/"""'], {}), "('../data/lfw/')\n", (1660, 1676), False, 'from utils import relative_path\n'), ((1735, 1776), 'utils.relative_path', 'relative_path', (['"""../data/lfw_landmark.csv"""'], {}), "('../data/lfw_landmark.csv')\n", (1748, 1776), False, 'from utils import relative_path\n'), ((1835, 1873), 'utils.relative_path', 'relative_path', (['"""../data/lfw_pairs.txt"""'], {}), "('../data/lfw_pairs.txt')\n", (1848, 1873), False, 'from utils import relative_path\n'), ((995, 1029), 'utils.relative_path', 'relative_path', (['"""../data/VGGFace2/"""'], {}), "('../data/VGGFace2/')\n", (1008, 1029), False, 'from utils import relative_path\n'), ((1100, 1134), 'utils.relative_path', 'relative_path', (['"""../data/VGGFace2/"""'], {}), "('../data/VGGFace2/')\n", (1113, 1134), False, 'from utils import relative_path\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_core.channels import UserMessage
from rasa_core.channels.direct import CollectingOutputChannel
from rasa_core.featurizers import BinaryFeaturizer
from rasa_core.interpreter import RegexInterpreter
from rasa_core.channels.console import ConsoleOutputChannel
from rasa_core.policies import PolicyTrainer
from rasa_core.policies.ensemble import SimplePolicyEnsemble
from rasa_core.policies.scoring_policy import ScoringPolicy
from rasa_core.processor import MessageProcessor
from rasa_core.tracker_store import InMemoryTrackerStore
def test_message_processor(default_domain, capsys):
story_filename = "data/dsl_stories/stories_defaultdomain.md"
ensemble = SimplePolicyEnsemble([ScoringPolicy()])
interpreter = RegexInterpreter()
PolicyTrainer(ensemble, default_domain, BinaryFeaturizer()).train(
story_filename,
max_history=3)
tracker_store = InMemoryTrackerStore(default_domain)
processor = MessageProcessor(interpreter,
ensemble,
default_domain,
tracker_store)
out = CollectingOutputChannel()
processor.handle_message(UserMessage("_greet[name=Core]", out))
assert ("default", "hey there Core!") == out.latest_output()
| [
"rasa_core.channels.UserMessage",
"rasa_core.channels.direct.CollectingOutputChannel",
"rasa_core.tracker_store.InMemoryTrackerStore",
"rasa_core.processor.MessageProcessor",
"rasa_core.policies.scoring_policy.ScoringPolicy",
"rasa_core.featurizers.BinaryFeaturizer",
"rasa_core.interpreter.RegexInterpre... | [((881, 899), 'rasa_core.interpreter.RegexInterpreter', 'RegexInterpreter', ([], {}), '()\n', (897, 899), False, 'from rasa_core.interpreter import RegexInterpreter\n'), ((1048, 1084), 'rasa_core.tracker_store.InMemoryTrackerStore', 'InMemoryTrackerStore', (['default_domain'], {}), '(default_domain)\n', (1068, 1084), False, 'from rasa_core.tracker_store import InMemoryTrackerStore\n'), ((1101, 1171), 'rasa_core.processor.MessageProcessor', 'MessageProcessor', (['interpreter', 'ensemble', 'default_domain', 'tracker_store'], {}), '(interpreter, ensemble, default_domain, tracker_store)\n', (1117, 1171), False, 'from rasa_core.processor import MessageProcessor\n'), ((1282, 1307), 'rasa_core.channels.direct.CollectingOutputChannel', 'CollectingOutputChannel', ([], {}), '()\n', (1305, 1307), False, 'from rasa_core.channels.direct import CollectingOutputChannel\n'), ((1337, 1374), 'rasa_core.channels.UserMessage', 'UserMessage', (['"""_greet[name=Core]"""', 'out'], {}), "('_greet[name=Core]', out)\n", (1348, 1374), False, 'from rasa_core.channels import UserMessage\n'), ((845, 860), 'rasa_core.policies.scoring_policy.ScoringPolicy', 'ScoringPolicy', ([], {}), '()\n', (858, 860), False, 'from rasa_core.policies.scoring_policy import ScoringPolicy\n'), ((945, 963), 'rasa_core.featurizers.BinaryFeaturizer', 'BinaryFeaturizer', ([], {}), '()\n', (961, 963), False, 'from rasa_core.featurizers import BinaryFeaturizer\n')] |
import uuid
from typing import Dict, List, Text, Union
import pandas as pd
import torch
from datasets import load_metric
from pytorch_lightning.metrics import Metric
from seqeval.metrics import classification_report
# MIT License
#
# Copyright (c) 2021 Université Paris-Saclay
# Copyright (c) 2021 Laboratoire national de métrologie et d'essais (LNE)
# Copyright (c) 2021 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from dataset import LabelEncoding
UNIQUE_RUN_ID = str(uuid.uuid4())
def cat_labels(old: List[List[Text]], new: List[List[Text]]) -> List[List[Text]]:
"""
Custom concatenation of lists to keep the
state of the metric as lists of lists.
"""
old.extend(new)
return old
class SlotF1(Metric):
"""
A PyTorch Lightning metric to calculate slot filling F1 score using the seqeval script.
The seqeval script is used via the Huggingface metrics interface.
"""
def __init__(
self,
label_encoding: LabelEncoding,
ignore_index: int,
dist_sync_on_step=False,
name_or_path: str = 'seqeval',
compute_report: bool = False
):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.encoding = label_encoding
self.ignore_index = ignore_index
self.seqeval = load_metric(name_or_path, experiment_id=UNIQUE_RUN_ID)
self.compute_report = compute_report
self.add_state("predictions", default=[], dist_reduce_fx=cat_labels)
self.add_state("targets", default=[], dist_reduce_fx=cat_labels)
def update(self, predictions: torch.Tensor, targets: torch.Tensor):
"""
Update internal state with a new batch of predictions and targets.
This function is called automatically by PyTorch Lightning.
:param predictions: Tensor, shape (batch_size, seq_len, num_slot_labels)
Model predictions per token as (log) softmax scores.
:param targets: Tensor, shape (batch_size, seq_len)
Slot filling ground truth per token encoded as integers.
"""
# Get hard predictions
predictions = torch.argmax(predictions, dim=-1)
# Transform to list since it needs to deal with different sequence lengths
predictions = predictions.tolist()
targets = targets.tolist()
# Remove ignored predictions (special tokens and possibly subtokens)
true_predictions = [
[self.encoding.get_slot_label_name(p) for (p, l) in zip(pred, label) if l != self.ignore_index]
for pred, label in zip(predictions, targets)
]
true_targets = [
[self.encoding.get_slot_label_name(l) for (p, l) in zip(pred, label) if l != self.ignore_index]
for pred, label in zip(predictions, targets)
]
# Add predictions and labels to current state
self.predictions += true_predictions
self.targets += true_targets
def compute(self) -> Union[torch.Tensor, Dict]:
"""
Compute the Slot F1 score using the current state.
"""
results = self.seqeval.compute(predictions=self.predictions, references=self.targets)
# overall_precision, overall_recall and overall_accuracy are also available
f1 = torch.tensor(results["overall_f1"])
if self.compute_report:
report = classification_report(
y_true=self.targets, y_pred=self.predictions, output_dict=True
)
return {"f1": f1, "report": pd.DataFrame(report).transpose()}
else:
return f1
| [
"seqeval.metrics.classification_report",
"datasets.load_metric",
"uuid.uuid4",
"torch.tensor",
"pandas.DataFrame",
"torch.argmax"
] | [((1492, 1504), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1502, 1504), False, 'import uuid\n'), ((2336, 2390), 'datasets.load_metric', 'load_metric', (['name_or_path'], {'experiment_id': 'UNIQUE_RUN_ID'}), '(name_or_path, experiment_id=UNIQUE_RUN_ID)\n', (2347, 2390), False, 'from datasets import load_metric\n'), ((3155, 3188), 'torch.argmax', 'torch.argmax', (['predictions'], {'dim': '(-1)'}), '(predictions, dim=-1)\n', (3167, 3188), False, 'import torch\n'), ((4294, 4329), 'torch.tensor', 'torch.tensor', (["results['overall_f1']"], {}), "(results['overall_f1'])\n", (4306, 4329), False, 'import torch\n'), ((4383, 4472), 'seqeval.metrics.classification_report', 'classification_report', ([], {'y_true': 'self.targets', 'y_pred': 'self.predictions', 'output_dict': '(True)'}), '(y_true=self.targets, y_pred=self.predictions,\n output_dict=True)\n', (4404, 4472), False, 'from seqeval.metrics import classification_report\n'), ((4539, 4559), 'pandas.DataFrame', 'pd.DataFrame', (['report'], {}), '(report)\n', (4551, 4559), True, 'import pandas as pd\n')] |
# Project Euler - Problem 4
# Find the largest palindrome made from the product of two 3-digit numbers.
import time
start = time.time()
def pal(s):
i = 0
j = len(s) - 1
while i < j:
if s[i] != s[j]:
return 0
i += 1
j -= 1
return 1
n1 = 100
n2 = 1000 # exclusive
mx = 0
for i in range(n1, n2):
for j in range(n1, n2):
if pal(str(i * j)) and mx < i * j:
mx = i * j
print(mx)
print(time.time() - start, "sec")
| [
"time.time"
] | [((125, 136), 'time.time', 'time.time', ([], {}), '()\n', (134, 136), False, 'import time\n'), ((402, 413), 'time.time', 'time.time', ([], {}), '()\n', (411, 413), False, 'import time\n')] |
#!/usr/bin/env python
###########################################################################
# Active Inference algorithm
#
# Execute the AI algorithm using the data from the
# /filter/y_coloured_noise topic and publish the results to the
# /filter/ai/output topic.
# Note that only the filtering part of the AI algorithm is implemented yet.
#
# Author: <NAME>, TU Delft
# Last modified: 17.11.2019
#
###########################################################################
#Import all necessary packages
import rospy #needed to be able to program in Python
import numpy as np #needed to be able to work with numpy
import time #needed to be able to get the execution time of code parts
from scipy.linalg import toeplitz #needed to create derivative matrix in general way
from scipy.linalg import block_diag #needed to create the block-diagonal PI matrix
from jackal_active_inference_versus_kalman_filter.msg import gazebo_model_states_noise #needed to read the custom output messages gazebo_model_states_noise
from jackal_active_inference_versus_kalman_filter.msg import filt_output #needed to publish the custom output messages filt_output resulting from the filtering methods
#TODO:
#-finish the implementation with a correct usage of the learning rate, precision matrices and prior
#-implement the update rule for the next control input
#-extend the algorithm to work on all system model states
#-use IMU data in case of experiment with Jackal robot
#Active Inference class
#-------------------------------------------------------------------
class AI(object):
"""Class providing all AI functionality:
- initialization of all necessary matrices
- compute belief mu
- compute control action u"""
def __init__(self, n_states, n_inputs, n_outputs, p, x_ref):
super(AI, self).__init__()
#Input processing
self.p = p
#Indicating the first time AI function is called
self.first_time = True
#System dimensions
self.n_states = n_states
self.n_inputs = n_inputs
self.n_outputs = n_outputs
#Initial states
self.x_0 = np.matrix(np.zeros(shape = (self.n_states, 1)))
self.mu_0 = np.matrix(np.zeros(shape = ((1 + self.p) * self.n_states, 1)))
self.mu = self.mu_0
self.mu_dot = np.matrix(np.zeros(shape = ((1 + self.p) * self.n_states, 1)))
#Initial system input (u) and output (z)
self.u = np.matrix(np.zeros(shape = (self.n_inputs, 1)))
self.z = np.matrix(np.zeros(shape = (self.n_outputs, 1)))
#Derivative matrix
self.Der = np.kron(np.eye((1 + self.p), k = 1), np.matrix(np.eye(self.n_states)))
#Learning rates #TODO: tune these values when correct usage of precision matrices is known
self.alpha_mu = 3.408*10**(-6)
# self.alpha_u = 0.01
#System matrices
self.A = -209.6785884514270
self.A_tilde = np.kron(np.eye(1 + self.p), self.A)
self.B = np.matrix('16.921645797507500 -16.921645797507500')
self.C = 1
self.C_tilde = np.kron(np.matrix(np.eye(1 + self.p)), self.C)
#Initial reference path (needed for prior belief): assuming no prior belief should be given
self.x_ref = x_ref
temp = np.matrix(np.zeros(shape = ((1 + self.p), 1)))
temp[0] = 1
self.mu_ref = np.kron(temp, self.x_ref) #this assumes that reference acceleration of the robot will always be zero (the reference velocity constant)!
self.xi = self.Der * self.mu_ref - self.A_tilde * self.mu_ref
#Forward model #TODO: is this one always correct to use or should it actually be combined with alpha_u for update rule of u?
# self.G = -1 * self.C * (1 / self.A) * self.B
def construct_precision_matrices(self, sigma_w, s_w, sigma_z, s_z):
'''Using the standard deviation information of the process output noise signals, construct the precision matrices'''
#Process noise precision matrix
self.sigma_w = sigma_w
self.s_w = s_w
self.SIGMA_w = np.matrix(np.eye(self.n_states)) * self.sigma_w**2
self.PI_w = self.generate_PI(1 + self.p, self.SIGMA_w, self.s_w)
#Output noise precision matrix
self.sigma_z = sigma_z
self.s_z = s_z
self.SIGMA_z = np.matrix(np.eye(self.n_states)) * self.sigma_z**2
self.PI_z = self.generate_PI(1 + self.p, self.SIGMA_z, self.s_z)
#Total precision matrix
self.PI = block_diag(self.PI_w, self.PI_z)
def generate_PI(self, k, SIGMA, s):
if np.amax(SIGMA) == 0:
print("PI cannot be generated if sigma is 0 or negative")
n = SIGMA.shape[0]
if s != 0:
l = np.array(range(0, 2*k-1, 2))
rho = np.matrix(np.zeros(shape = (1, 2*k-1)))
rho[0,l] = np.cumprod(1-l)/(np.sqrt(2)*s)**l
V = np.matrix(np.zeros(shape = (k, k)))
for r in range(k):
V[r,:] = rho[0,r:r+k]
rho = -rho
SIGMA_tilde = np.kron(V, SIGMA)
PI = np.linalg.inv(SIGMA_tilde)
else:
PI = np.matrix(np.zeros(shape = (k*n, k*n)))
PI[0:n, 0:n] = np.linalg.inv(SIGMA)
return PI
def compute_mu(self):
'''Update belief mu'''
self.mu_dot = self.Der * self.mu - self.alpha_mu * ((self.Der - self.A_tilde).getT() * self.PI_w * (self.Der * self.mu - self.A_tilde * self.mu - self.xi) - self.C_tilde.getT() * self.PI_z * (self.z_gen - self.C_tilde * self.mu))
# self.mu_dot = self.Der * self.mu - self.alpha_mu * ((self.Der - self.A_tilde).getT() * self.PI_w * (self.Der * self.mu - self.A_tilde * self.mu - self.xi) - self.C_tilde.getT() * self.PI_z * (self.z - self.C_tilde * self.mu))
self.mu = self.mu + self.mu_dot * self.delta_t
def compute_u(self):
'''Update control action u'''
# self.u_dot = -1 * self.alpha_u * self.G.getT() * self.PI_z * (self.z - self.C_tilde * self.mu)
# self.u = self.u + self.u_dot * self.delta_t
def debug(self):
'''Debug function for AI functionality: print all kinds of desirable variables'''
print("Der:\n{}\n\nmu:\n{}\n\nmu_dot:\n{}\n\nA_tilde:\n{}\n\nPI_w:\n{}\n\nxi:\n{}\n\nC_tilde:\n{}\n\nPI_z:\n{}\n\n-------------------------------------------------------------------------------------------\n".format(self.Der, self.mu, self.mu_dot, self.A_tilde, self.PI_w, self.xi, self.C_tilde, self.PI_z))
print("Der*mu:\n{}\n\n2nd term:\n{}\n\n3rd term:\n{}\n\nmu_dot:\n{}\n\nmu:\n{}\n\n-------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------\n".format(self.Der*self.mu, self.alpha_mu * ((self.Der - self.A_tilde).getT() * self.PI_w * (self.Der * self.mu - self.A_tilde * self.mu - self.xi)), self.alpha_mu * (self.C_tilde.getT() * self.PI_z * (self.z - self.C_tilde * self.mu)), self.mu_dot, self.mu))
print("C_tildeT:\n{}\n\nPI_z:\n{}\n\nC_tildeT*PI_z:\n{}\n\nz:\n{}\n\nC_tilde:\n{}\n\nC_tilde*mu:\n{}\n\nz-C_tilde*mu:\n{}\n\n-------------------------------------------------------------------------------------------\n".format(self.C_tilde.getT(), self.PI_z, self.C_tilde.getT()*self.PI_z, self.z, self.C_tilde, self.C_tilde*self.mu, self.z-self.C_tilde*self.mu))
print("C_tildeT*PI_z:\n{}\n\nz:\n{}\n\nC_tilde*mu:\n{}\n\nz-C_tilde*mu:\n{}\n\n3rd term:\n{}\n\n-------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------\n".format(self.C_tilde.getT()*self.PI_z, z, self.C_tilde*self.mu, z-self.C_tilde*self.mu, self.C_tilde.getT() * self.PI_z * (z - self.C_tilde * self.mu)))
#-------------------------------------------------------------------
#Subscriber class
#-------------------------------------------------------------------
class Subscriber(object):
"""Class providing all functionality needed to:
- subscribe to the measurement data
- run the AI equations
- publish the result"""
def __init__(self):
super(Subscriber, self).__init__()
#Create AI object
self.mean_u = np.matrix([[4.183917321479406], [1.942289357961973]])
self.mean_y = 0.401988453296692
self.debug = False
self.n_states = 1
self.p = 6
self.x_ref = np.matrix(np.zeros(shape = (self.n_states, 1)))
#---------------------------------------------
self.ai = AI(n_states = self.n_states, n_inputs = 1, n_outputs = 1, p = self.p, x_ref = self.x_ref)
#Initialize node, publisher and subscriber
self.msg = filt_output() #construct the custom message filt_output
rospy.init_node('ai', anonymous=True)
self.publisher = rospy.Publisher('filter/ai/output', filt_output, queue_size=1)
rospy.Subscriber('filter/y_coloured_noise', gazebo_model_states_noise, self.callback)
rospy.spin()
def callback(self, data):
'''Get system output z and call AI functionality'''
#The first time data comes in, the Gazebo model states update time is known and the precision matrices can be constructed
if self.ai.first_time:
self.ai.delta_t = data.delta_t #get time difference between two subsequent Gazebo model states data updates
self.ai.construct_precision_matrices(data.sigma_w, data.s_w, data.sigma_z, data.s_z)
self.ai.first_time = False
#Transform system output from operating point to origin and provide to AI algorithm
self.z = data.y_model_noise[2]
self.ai.z = self.z - self.mean_y
temp = np.matrix(np.zeros(shape = (1 + self.p, 1)))
temp[0,0] = 1
self.ai.z_gen = np.kron(temp, self.ai.z)
#Call AI functionality
if self.debug:
self.ai.debug()
self.ai.compute_mu()
self.ai.compute_u()
self.x_filt = self.ai.mu[:self.n_states, 0] + 1/self.ai.C*self.mean_y
#Publish result AI algorithm
self.msg.x_filt = [float(self.x_filt)]
# self.msg.u = [float(i) for i in self.ai.u]
# self.msg.u_lin = []
# for i,x in enumerate(self.msg.u):
# self.msg.u_lin.append(x - self.mean_u[i])
self.msg.y = [float(self.z)]
self.msg.y_lin = [float(self.ai.z)]
self.publisher.publish(self.msg)
#-------------------------------------------------------------------
#Main function
if __name__ == '__main__':
subscriber = Subscriber()
| [
"numpy.eye",
"numpy.sqrt",
"rospy.Subscriber",
"rospy.init_node",
"numpy.kron",
"numpy.zeros",
"numpy.linalg.inv",
"rospy.spin",
"scipy.linalg.block_diag",
"jackal_active_inference_versus_kalman_filter.msg.filt_output",
"numpy.matrix",
"rospy.Publisher",
"numpy.amax",
"numpy.cumprod"
] | [((3139, 3190), 'numpy.matrix', 'np.matrix', (['"""16.921645797507500 -16.921645797507500"""'], {}), "('16.921645797507500 -16.921645797507500')\n", (3148, 3190), True, 'import numpy as np\n'), ((3520, 3545), 'numpy.kron', 'np.kron', (['temp', 'self.x_ref'], {}), '(temp, self.x_ref)\n', (3527, 3545), True, 'import numpy as np\n'), ((4689, 4721), 'scipy.linalg.block_diag', 'block_diag', (['self.PI_w', 'self.PI_z'], {}), '(self.PI_w, self.PI_z)\n', (4699, 4721), False, 'from scipy.linalg import block_diag\n'), ((8595, 8648), 'numpy.matrix', 'np.matrix', (['[[4.183917321479406], [1.942289357961973]]'], {}), '([[4.183917321479406], [1.942289357961973]])\n', (8604, 8648), True, 'import numpy as np\n'), ((9064, 9077), 'jackal_active_inference_versus_kalman_filter.msg.filt_output', 'filt_output', ([], {}), '()\n', (9075, 9077), False, 'from jackal_active_inference_versus_kalman_filter.msg import filt_output\n'), ((9129, 9166), 'rospy.init_node', 'rospy.init_node', (['"""ai"""'], {'anonymous': '(True)'}), "('ai', anonymous=True)\n", (9144, 9166), False, 'import rospy\n'), ((9192, 9254), 'rospy.Publisher', 'rospy.Publisher', (['"""filter/ai/output"""', 'filt_output'], {'queue_size': '(1)'}), "('filter/ai/output', filt_output, queue_size=1)\n", (9207, 9254), False, 'import rospy\n'), ((9263, 9353), 'rospy.Subscriber', 'rospy.Subscriber', (['"""filter/y_coloured_noise"""', 'gazebo_model_states_noise', 'self.callback'], {}), "('filter/y_coloured_noise', gazebo_model_states_noise, self\n .callback)\n", (9279, 9353), False, 'import rospy\n'), ((9357, 9369), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (9367, 9369), False, 'import rospy\n'), ((10203, 10227), 'numpy.kron', 'np.kron', (['temp', 'self.ai.z'], {}), '(temp, self.ai.z)\n', (10210, 10227), True, 'import numpy as np\n'), ((2268, 2302), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.n_states, 1)'}), '(shape=(self.n_states, 1))\n', (2276, 2302), True, 'import numpy as np\n'), ((2336, 2385), 'numpy.zeros', 'np.zeros', ([], {'shape': '((1 + self.p) * self.n_states, 1)'}), '(shape=((1 + self.p) * self.n_states, 1))\n', (2344, 2385), True, 'import numpy as np\n'), ((2449, 2498), 'numpy.zeros', 'np.zeros', ([], {'shape': '((1 + self.p) * self.n_states, 1)'}), '(shape=((1 + self.p) * self.n_states, 1))\n', (2457, 2498), True, 'import numpy as np\n'), ((2587, 2621), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.n_inputs, 1)'}), '(shape=(self.n_inputs, 1))\n', (2595, 2621), True, 'import numpy as np\n'), ((2652, 2687), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.n_outputs, 1)'}), '(shape=(self.n_outputs, 1))\n', (2660, 2687), True, 'import numpy as np\n'), ((2754, 2777), 'numpy.eye', 'np.eye', (['(1 + self.p)'], {'k': '(1)'}), '(1 + self.p, k=1)\n', (2760, 2777), True, 'import numpy as np\n'), ((3094, 3112), 'numpy.eye', 'np.eye', (['(1 + self.p)'], {}), '(1 + self.p)\n', (3100, 3112), True, 'import numpy as np\n'), ((3441, 3472), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1 + self.p, 1)'}), '(shape=(1 + self.p, 1))\n', (3449, 3472), True, 'import numpy as np\n'), ((4775, 4789), 'numpy.amax', 'np.amax', (['SIGMA'], {}), '(SIGMA)\n', (4782, 4789), True, 'import numpy as np\n'), ((5294, 5311), 'numpy.kron', 'np.kron', (['V', 'SIGMA'], {}), '(V, SIGMA)\n', (5301, 5311), True, 'import numpy as np\n'), ((5329, 5355), 'numpy.linalg.inv', 'np.linalg.inv', (['SIGMA_tilde'], {}), '(SIGMA_tilde)\n', (5342, 5355), True, 'import numpy as np\n'), ((5467, 5487), 'numpy.linalg.inv', 'np.linalg.inv', (['SIGMA'], {}), '(SIGMA)\n', (5480, 5487), True, 'import numpy as np\n'), ((8792, 8826), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.n_states, 1)'}), '(shape=(self.n_states, 1))\n', (8800, 8826), True, 'import numpy as np\n'), ((10122, 10153), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1 + self.p, 1)'}), '(shape=(1 + self.p, 1))\n', (10130, 10153), True, 'import numpy as np\n'), ((2793, 2814), 'numpy.eye', 'np.eye', (['self.n_states'], {}), '(self.n_states)\n', (2799, 2814), True, 'import numpy as np\n'), ((3251, 3269), 'numpy.eye', 'np.eye', (['(1 + self.p)'], {}), '(1 + self.p)\n', (3257, 3269), True, 'import numpy as np\n'), ((4267, 4288), 'numpy.eye', 'np.eye', (['self.n_states'], {}), '(self.n_states)\n', (4273, 4288), True, 'import numpy as np\n'), ((4516, 4537), 'numpy.eye', 'np.eye', (['self.n_states'], {}), '(self.n_states)\n', (4522, 4537), True, 'import numpy as np\n'), ((5007, 5037), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 2 * k - 1)'}), '(shape=(1, 2 * k - 1))\n', (5015, 5037), True, 'import numpy as np\n'), ((5060, 5077), 'numpy.cumprod', 'np.cumprod', (['(1 - l)'], {}), '(1 - l)\n', (5070, 5077), True, 'import numpy as np\n'), ((5133, 5155), 'numpy.zeros', 'np.zeros', ([], {'shape': '(k, k)'}), '(shape=(k, k))\n', (5141, 5155), True, 'import numpy as np\n'), ((5410, 5440), 'numpy.zeros', 'np.zeros', ([], {'shape': '(k * n, k * n)'}), '(shape=(k * n, k * n))\n', (5418, 5440), True, 'import numpy as np\n'), ((5077, 5087), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5084, 5087), True, 'import numpy as np\n')] |
import os, sys
import unittest
import subprocess
import shutil
import logging
import io
import stat
from pathlib import Path as _Path
from multiprocessing import freeze_support
from sys import platform as _platform
import json
from clang_build import cli
from clang_build import toolchain
from clang_build.errors import CompileError
from clang_build.errors import LinkError
from clang_build.logging_tools import TqdmHandler as TqdmHandler
def on_rm_error( func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and try to unlink it.
try:
os.chmod( path, stat.S_IWRITE )
os.unlink( path )
except:
print(f'Error trying to clean up file "{path}":\n{exc_info}')
def clang_build_try_except( args ):
try:
cli.build(cli.parse_args(args))
except CompileError as compile_error:
logger = logging.getLogger('clang_build')
logger.error('Compilation was unsuccessful:')
for target, errors in compile_error.error_dict.items():
printout = f'Target [{target}] did not compile. Errors:\n'
printout += ' '.join(errors)
logger.error(printout)
except LinkError as link_error:
logger = logging.getLogger('clang_build')
logger.error('Linking was unsuccessful:')
for target, errors in link_error.error_dict.items():
printout = f'Target [{target}] did not link. Errors:\n{errors}'
logger.error(printout)
class TestClangBuild(unittest.TestCase):
def test_hello_world_mwe(self):
clang_build_try_except(['-d', 'test/mwe'])
try:
output = subprocess.check_output(['./build/default/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
compile_commands_file = _Path("build") / "compile_commands.json"
compile_commands = []
self.assertTrue(compile_commands_file.exists())
compile_commands_str = compile_commands_file.read_text()
logger = logging.getLogger('clang_build')
logger.info(compile_commands_str)
compile_commands = json.loads(compile_commands_str)
for command in compile_commands:
self.assertEqual(str(_Path('test/mwe/hello.cpp').resolve()), str(_Path(command["file"]).resolve()))
self.assertTrue(
str(_Path('./build/default/obj/hello.o').resolve()) == str(_Path(command["output"]).resolve()) or
str(_Path('./build/default/dep/hello.d').resolve()) == str(_Path(command["output"]).resolve())
)
def test_build_types(self):
for build_type in ['release', 'relwithdebinfo', 'debug', 'coverage']:
clang_build_try_except(['-d', 'test/mwe', '-b', build_type])
try:
output = subprocess.check_output([f'./build/{build_type}/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program with build type "{build_type}". Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
def test_compile_error(self):
with self.assertRaises(CompileError):
cli.build(cli.parse_args(['-d', 'test/build_errors/compile_error', '-V']))
def test_link_error(self):
with self.assertRaises(LinkError):
cli.build(cli.parse_args(['-d', 'test/build_errors/link_error', '-V']))
def test_script_call(self):
try:
subprocess.check_output(['clang-build', '-d', 'test/mwe', '-V'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.fail('Compilation failed')
try:
output = subprocess.check_output(['./build/default/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
def test_hello_world_rebuild(self):
clang_build_try_except(['-d', 'test/mwe', '-V'])
try:
output = subprocess.check_output(['./build/default/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
### TODO: the following does not seem to work under coverage runs...
# logger = logging.getLogger('clang_build')
# stream_capture = io.StringIO()
# ch = logging.StreamHandler(stream_capture)
# ch.setLevel(logging.DEBUG)
# logger.addHandler(ch)
# clang_build_try_except(['-d', 'test/mwe', '-V'])
# logger.removeHandler(ch)
# self.assertRegex(stream_capture.getvalue(), r'.*\[main\]: target is already compiled*')
# stream_capture = io.StringIO()
# ch = logging.StreamHandler(stream_capture)
# ch.setLevel(logging.DEBUG)
# logger.addHandler(ch)
# clang_build_try_except(['-d', 'test/mwe', '-V', '-f'])
# logger.removeHandler(ch)
# self.assertRegex(stream_capture.getvalue(), r'.*\[main\]: target needs to build sources*')
def test_automatic_include_folders(self):
clang_build_try_except(['-d', 'test/mwe_with_default_folders', '-V'])
try:
output = subprocess.check_output(['./build/default/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Calculated Magic: 30')
def test_toml_mwe(self):
clang_build_try_except(['-d', 'test/toml_mwe'])
try:
output = subprocess.check_output(['./build/default/bin/runHello'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
def test_toml_custom_folder(self):
clang_build_try_except(['-d', 'test/toml_with_custom_folder'])
try:
output = subprocess.check_output(['./build/default/bin/runHello'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
def test_pyapi_directory(self):
clang_build_try_except(['-d', 'test/py-api/directory', '-V'])
try:
output = subprocess.check_output(['./build/default/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'the version is 1.2.0')
def test_subproject(self):
clang_build_try_except(['-d', 'test/subproject', '-V'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/runLib'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello! mylib::triple(3) returned 9')
def test_public_dependency(self):
clang_build_try_except(['-d', 'test/public_dependency', '-V'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/runLib'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello! libC::half(libA::triple(4)) returned 6')
def test_pyapi_subproject(self):
clang_build_try_except(['-d', 'test/py-api/subproject', '-V'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/runLib'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello! mylib::triple(3) returned 9')
def test_boost_filesystem(self):
clang_build_try_except(['-d', 'test/boost-filesystem', '-V'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/myexe', 'build'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, '"build" is a directory')
def test_c_library(self):
clang_build_try_except(['-d', 'test/c-library', '-V'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/myexe'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, '3 2 0'+os.linesep+'3 1 0')
def test_build_all(self):
clang_build_try_except(['-d', 'test/c-library', '-V', '-a'])
try:
output = subprocess.check_output(['./build/qhull/qhull-executable/default/bin/qhull', '-V'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail('Could not run a target which should have been built')
self.assertEqual(output, 'qhull_r 7.2.0 (2015.2.r 2016/01/18)')
def test_platform_flags(self):
clang_build_try_except(['-d', 'test/platform_flags', '-V', '--debug'])
try:
output = subprocess.check_output(['./build/default/bin/myexe'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
if _platform == 'linux':
self.assertEqual(output, 'Hello Linux!')
elif _platform == 'darwin':
self.assertEqual(output, 'Hello OSX!')
elif _platform == 'win32':
self.assertEqual(output, 'Hello Windows!')
else:
raise RuntimeError('Tried to run test_platform_flags on unsupported platform ' + _platform)
def test_openmp(self):
clang_build_try_except(['-d', 'test/openmp', '-V'])
try:
output = subprocess.check_output(['./build/default/bin/runHello'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertRegex(output, r'Hello from thread 1, nthreads*')
def test_mwe_two_targets(self):
clang_build_try_except(['-d', 'test/multi_target_external', '-V', '--bundle'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/runLib'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello! mylib::calculate() returned 2')
def test_pybind11(self):
clang_build_try_except(['-d', 'test/pybind11', '-V'])
pylib_dir = os.path.abspath(os.path.join("build", "pylib", "default", toolchain.LLVM.PLATFORM_DEFAULTS[_platform]['SHARED_LIBRARY_OUTPUT_DIR']))
sys.path.insert(0, pylib_dir)
try:
import pylib
output = pylib.triple(3)
self.assertEqual(output, 9)
except ImportError:
if os.path.exists(pylib_dir):
print(f'Expected location "{pylib_dir}" contains: {os.listdir(pylib_dir)}')
else:
print(f'Expected location "{pylib_dir}" does not exist!')
self.fail('Import of pylib failed!')
def setUp(self):
logger = logging.getLogger('clang_build')
logger.setLevel(logging.INFO)
ch = TqdmHandler()
formatter = logging.Formatter('%(message)s')
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.handlers = []
logger.addHandler(ch)
def tearDown(self):
if _Path('build').exists():
shutil.rmtree('build', onerror = on_rm_error)
if __name__ == '__main__':
freeze_support()
unittest.main() | [
"logging.getLogger",
"subprocess.check_output",
"json.loads",
"sys.path.insert",
"clang_build.cli.parse_args",
"os.path.exists",
"pylib.triple",
"pathlib.Path",
"os.listdir",
"logging.Formatter",
"os.path.join",
"os.chmod",
"clang_build.logging_tools.TqdmHandler",
"multiprocessing.freeze_s... | [((12753, 12769), 'multiprocessing.freeze_support', 'freeze_support', ([], {}), '()\n', (12767, 12769), False, 'from multiprocessing import freeze_support\n'), ((12774, 12789), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12787, 12789), False, 'import unittest\n'), ((631, 660), 'os.chmod', 'os.chmod', (['path', 'stat.S_IWRITE'], {}), '(path, stat.S_IWRITE)\n', (639, 660), False, 'import os, sys\n'), ((671, 686), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (680, 686), False, 'import os, sys\n'), ((2207, 2239), 'logging.getLogger', 'logging.getLogger', (['"""clang_build"""'], {}), "('clang_build')\n", (2224, 2239), False, 'import logging\n'), ((2309, 2341), 'json.loads', 'json.loads', (['compile_commands_str'], {}), '(compile_commands_str)\n', (2319, 2341), False, 'import json\n'), ((11833, 11862), 'sys.path.insert', 'sys.path.insert', (['(0)', 'pylib_dir'], {}), '(0, pylib_dir)\n', (11848, 11862), False, 'import os, sys\n'), ((12322, 12354), 'logging.getLogger', 'logging.getLogger', (['"""clang_build"""'], {}), "('clang_build')\n", (12339, 12354), False, 'import logging\n'), ((12406, 12419), 'clang_build.logging_tools.TqdmHandler', 'TqdmHandler', ([], {}), '()\n', (12417, 12419), True, 'from clang_build.logging_tools import TqdmHandler as TqdmHandler\n'), ((12440, 12472), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (12457, 12472), False, 'import logging\n'), ((835, 855), 'clang_build.cli.parse_args', 'cli.parse_args', (['args'], {}), '(args)\n', (849, 855), False, 'from clang_build import cli\n'), ((916, 948), 'logging.getLogger', 'logging.getLogger', (['"""clang_build"""'], {}), "('clang_build')\n", (933, 948), False, 'import logging\n'), ((1267, 1299), 'logging.getLogger', 'logging.getLogger', (['"""clang_build"""'], {}), "('clang_build')\n", (1284, 1299), False, 'import logging\n'), ((1997, 2011), 'pathlib.Path', '_Path', (['"""build"""'], {}), "('build')\n", (2002, 2011), True, 'from pathlib import Path as _Path\n'), ((3702, 3797), 'subprocess.check_output', 'subprocess.check_output', (["['clang-build', '-d', 'test/mwe', '-V']"], {'stderr': 'subprocess.STDOUT'}), "(['clang-build', '-d', 'test/mwe', '-V'], stderr=\n subprocess.STDOUT)\n", (3725, 3797), False, 'import subprocess\n'), ((11708, 11828), 'os.path.join', 'os.path.join', (['"""build"""', '"""pylib"""', '"""default"""', "toolchain.LLVM.PLATFORM_DEFAULTS[_platform]['SHARED_LIBRARY_OUTPUT_DIR']"], {}), "('build', 'pylib', 'default', toolchain.LLVM.PLATFORM_DEFAULTS[\n _platform]['SHARED_LIBRARY_OUTPUT_DIR'])\n", (11720, 11828), False, 'import os, sys\n'), ((11923, 11938), 'pylib.triple', 'pylib.triple', (['(3)'], {}), '(3)\n', (11935, 11938), False, 'import pylib\n'), ((12674, 12717), 'shutil.rmtree', 'shutil.rmtree', (['"""build"""'], {'onerror': 'on_rm_error'}), "('build', onerror=on_rm_error)\n", (12687, 12717), False, 'import shutil\n'), ((3420, 3483), 'clang_build.cli.parse_args', 'cli.parse_args', (["['-d', 'test/build_errors/compile_error', '-V']"], {}), "(['-d', 'test/build_errors/compile_error', '-V'])\n", (3434, 3483), False, 'from clang_build import cli\n'), ((3582, 3642), 'clang_build.cli.parse_args', 'cli.parse_args', (["['-d', 'test/build_errors/link_error', '-V']"], {}), "(['-d', 'test/build_errors/link_error', '-V'])\n", (3596, 3642), False, 'from clang_build import cli\n'), ((12023, 12048), 'os.path.exists', 'os.path.exists', (['pylib_dir'], {}), '(pylib_dir)\n', (12037, 12048), False, 'import os, sys\n'), ((12637, 12651), 'pathlib.Path', '_Path', (['"""build"""'], {}), "('build')\n", (12642, 12651), True, 'from pathlib import Path as _Path\n'), ((1686, 1765), 'subprocess.check_output', 'subprocess.check_output', (["['./build/default/bin/main']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/default/bin/main'], stderr=subprocess.STDOUT)\n", (1709, 1765), False, 'import subprocess\n'), ((2416, 2443), 'pathlib.Path', '_Path', (['"""test/mwe/hello.cpp"""'], {}), "('test/mwe/hello.cpp')\n", (2421, 2443), True, 'from pathlib import Path as _Path\n'), ((2460, 2482), 'pathlib.Path', '_Path', (["command['file']"], {}), "(command['file'])\n", (2465, 2482), True, 'from pathlib import Path as _Path\n'), ((3922, 4001), 'subprocess.check_output', 'subprocess.check_output', (["['./build/default/bin/main']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/default/bin/main'], stderr=subprocess.STDOUT)\n", (3945, 4001), False, 'import subprocess\n'), ((4333, 4412), 'subprocess.check_output', 'subprocess.check_output', (["['./build/default/bin/main']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/default/bin/main'], stderr=subprocess.STDOUT)\n", (4356, 4412), False, 'import subprocess\n'), ((5622, 5701), 'subprocess.check_output', 'subprocess.check_output', (["['./build/default/bin/main']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/default/bin/main'], stderr=subprocess.STDOUT)\n", (5645, 5701), False, 'import subprocess\n'), ((6035, 6123), 'subprocess.check_output', 'subprocess.check_output', (["['./build/default/bin/runHello']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/default/bin/runHello'], stderr=subprocess\n .STDOUT)\n", (6058, 6123), False, 'import subprocess\n'), ((6463, 6551), 'subprocess.check_output', 'subprocess.check_output', (["['./build/default/bin/runHello']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/default/bin/runHello'], stderr=subprocess\n .STDOUT)\n", (6486, 6551), False, 'import subprocess\n'), ((6887, 6966), 'subprocess.check_output', 'subprocess.check_output', (["['./build/default/bin/main']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/default/bin/main'], stderr=subprocess.STDOUT)\n", (6910, 6966), False, 'import subprocess\n'), ((7310, 7402), 'subprocess.check_output', 'subprocess.check_output', (["['./build/myexe/default/bin/runLib']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/myexe/default/bin/runLib'], stderr=\n subprocess.STDOUT)\n", (7333, 7402), False, 'import subprocess\n'), ((7769, 7861), 'subprocess.check_output', 'subprocess.check_output', (["['./build/myexe/default/bin/runLib']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/myexe/default/bin/runLib'], stderr=\n subprocess.STDOUT)\n", (7792, 7861), False, 'import subprocess\n'), ((8238, 8330), 'subprocess.check_output', 'subprocess.check_output', (["['./build/myexe/default/bin/runLib']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/myexe/default/bin/runLib'], stderr=\n subprocess.STDOUT)\n", (8261, 8330), False, 'import subprocess\n'), ((8695, 8794), 'subprocess.check_output', 'subprocess.check_output', (["['./build/myexe/default/bin/myexe', 'build']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/myexe/default/bin/myexe', 'build'],\n stderr=subprocess.STDOUT)\n", (8718, 8794), False, 'import subprocess\n'), ((9134, 9225), 'subprocess.check_output', 'subprocess.check_output', (["['./build/myexe/default/bin/myexe']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/myexe/default/bin/myexe'], stderr=\n subprocess.STDOUT)\n", (9157, 9225), False, 'import subprocess\n'), ((9572, 9685), 'subprocess.check_output', 'subprocess.check_output', (["['./build/qhull/qhull-executable/default/bin/qhull', '-V']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/qhull/qhull-executable/default/bin/qhull',\n '-V'], stderr=subprocess.STDOUT)\n", (9595, 9685), False, 'import subprocess\n'), ((10057, 10142), 'subprocess.check_output', 'subprocess.check_output', (["['./build/default/bin/myexe']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/default/bin/myexe'], stderr=subprocess.STDOUT\n )\n", (10080, 10142), False, 'import subprocess\n'), ((10797, 10885), 'subprocess.check_output', 'subprocess.check_output', (["['./build/default/bin/runHello']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/default/bin/runHello'], stderr=subprocess\n .STDOUT)\n", (10820, 10885), False, 'import subprocess\n'), ((11263, 11355), 'subprocess.check_output', 'subprocess.check_output', (["['./build/myexe/default/bin/runLib']"], {'stderr': 'subprocess.STDOUT'}), "(['./build/myexe/default/bin/runLib'], stderr=\n subprocess.STDOUT)\n", (11286, 11355), False, 'import subprocess\n'), ((2990, 3080), 'subprocess.check_output', 'subprocess.check_output', (["[f'./build/{build_type}/bin/main']"], {'stderr': 'subprocess.STDOUT'}), "([f'./build/{build_type}/bin/main'], stderr=\n subprocess.STDOUT)\n", (3013, 3080), False, 'import subprocess\n'), ((12117, 12138), 'os.listdir', 'os.listdir', (['pylib_dir'], {}), '(pylib_dir)\n', (12127, 12138), False, 'import os, sys\n'), ((2544, 2580), 'pathlib.Path', '_Path', (['"""./build/default/obj/hello.o"""'], {}), "('./build/default/obj/hello.o')\n", (2549, 2580), True, 'from pathlib import Path as _Path\n'), ((2599, 2623), 'pathlib.Path', '_Path', (["command['output']"], {}), "(command['output'])\n", (2604, 2623), True, 'from pathlib import Path as _Path\n'), ((2658, 2694), 'pathlib.Path', '_Path', (['"""./build/default/dep/hello.d"""'], {}), "('./build/default/dep/hello.d')\n", (2663, 2694), True, 'from pathlib import Path as _Path\n'), ((2713, 2737), 'pathlib.Path', '_Path', (["command['output']"], {}), "(command['output'])\n", (2718, 2737), True, 'from pathlib import Path as _Path\n')] |
from typing import Callable, Mapping, TypedDict
import asyncio, aiohttp, discord
class Options(TypedDict):
interval: int
start: bool
class AutoPoster():
token: str
interval: int
bot: discord.Client
stopped: bool
_events: Mapping[str, Callable]
def __init__(self, token: str, bot: discord.Client, options: Options = {
'interval': 900000,
'start': True
}):
self.token = token
self.bot = bot
if 'interval' not in options: options['interval'] = 900000
else:
if not isinstance(options['interval'], int) or options['interval'] < 900000: raise TypeError('Invalid interval duration!')
if 'start' not in options: options['start'] = True
self.interval = options['interval']
self.stopped = not options['start']
def on_post(self) -> None:
return None
def on_error(self) -> None:
return None
self._events = {
'post': on_post,
'error': on_error
}
def on(self, event: str) -> Callable[[Callable], None]:
def add_listener(callback: Callable):
self._events[event] = callback
return add_listener
def emit(self, event: str, data):
if event in self._events: self._events[event](data)
async def init(self):
while not self.stopped:
async with aiohttp.ClientSession() as session:
async with session.post(f"https://listcord.gg/api/bot/{self.bot.user.id}/stats" , headers = {'Authorization' : self.token}, json = {'server_count': len(self.bot.guilds)}) as result:
if result.status != 200: self.emit('error', await result.json())
else: self.emit('post', await result.json())
await asyncio.sleep(self.interval)
def start(self):
self.stopped = False
def stop(self):
self.stopped = True
| [
"aiohttp.ClientSession",
"asyncio.sleep"
] | [((1416, 1439), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1437, 1439), False, 'import asyncio, aiohttp, discord\n'), ((1819, 1847), 'asyncio.sleep', 'asyncio.sleep', (['self.interval'], {}), '(self.interval)\n', (1832, 1847), False, 'import asyncio, aiohttp, discord\n')] |
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from animais.models import Animal
class AnimaisTestCase(LiveServerTestCase):
def setUp(self):
chrome_options = Options()
chrome_options.add_argument('--headless')
self.browser = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options)
self.animal = Animal.objects.create(
nome_animal='Leão',
predador='Sim',
venenoso='Não',
domestico='Não'
)
def tearDown(self) -> None:
self.browser.quit()
def test_busca_animal(self):
"""Teste se um usuário pode buscar um animal pelo nome"""
home_page = self.browser.get(self.live_server_url)
brand_element = self.browser.find_element_by_css_selector('.navbar')
self.assertEqual('Busca Animal', brand_element.text)
buscar_animal_input = self.browser.find_element_by_css_selector('input#buscar-animal')
self.assertEqual(buscar_animal_input.get_attribute('placeholder'), "Exemplo: leão, urso...")
buscar_animal_input.send_keys('leão')
self.browser.find_element_by_css_selector('form button').click()
caracteristicas = self.browser.find_elements_by_css_selector('.result-description')
self.assertGreater(len(caracteristicas), 3)
| [
"selenium.webdriver.chrome.options.Options",
"animais.models.Animal.objects.create",
"selenium.webdriver.Chrome"
] | [((253, 262), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (260, 262), False, 'from selenium.webdriver.chrome.options import Options\n'), ((336, 424), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'executable_path': '"""chromedriver.exe"""', 'chrome_options': 'chrome_options'}), "(executable_path='chromedriver.exe', chrome_options=\n chrome_options)\n", (352, 424), False, 'from selenium import webdriver\n'), ((443, 537), 'animais.models.Animal.objects.create', 'Animal.objects.create', ([], {'nome_animal': '"""Leão"""', 'predador': '"""Sim"""', 'venenoso': '"""Não"""', 'domestico': '"""Não"""'}), "(nome_animal='Leão', predador='Sim', venenoso='Não',\n domestico='Não')\n", (464, 537), False, 'from animais.models import Animal\n')] |
import logging
import os
logging.basicConfig(level=os.environ.get('LOG_LEVEL', 'INFO'), format='[python %(name)s pid: %(process)d] %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
logger.info('{} imported'.format(__name__))
| [
"logging.getLogger",
"os.environ.get"
] | [((170, 197), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (187, 197), False, 'import logging\n'), ((52, 87), 'os.environ.get', 'os.environ.get', (['"""LOG_LEVEL"""', '"""INFO"""'], {}), "('LOG_LEVEL', 'INFO')\n", (66, 87), False, 'import os\n')] |
import os
import constants.constants as const
SVC_NAME = const.SVC_NAME
MODEL_CONFIG = {
'model_path': os.getenv('MODEL_PATH', 'data/model/crnn_model.h5')
}
LOGGER_CONFIG = {
'log_level': os.getenv('LOG_LEVEL', 'DEBUG'),
'log_handle': os.getenv('LOG_HANDLE', 'file'),
'log_path': os.getenv('LOG_PATH', 'log/'),
'log_file': os.getenv('LOG_FILE', SVC_NAME)
}
SERVICE_CONFIG = {
'port': os.getenv('PORT', 8000),
'workers': os.getenv('WORKERS', 1)
} | [
"os.getenv"
] | [((115, 166), 'os.getenv', 'os.getenv', (['"""MODEL_PATH"""', '"""data/model/crnn_model.h5"""'], {}), "('MODEL_PATH', 'data/model/crnn_model.h5')\n", (124, 166), False, 'import os\n'), ((209, 240), 'os.getenv', 'os.getenv', (['"""LOG_LEVEL"""', '"""DEBUG"""'], {}), "('LOG_LEVEL', 'DEBUG')\n", (218, 240), False, 'import os\n'), ((261, 292), 'os.getenv', 'os.getenv', (['"""LOG_HANDLE"""', '"""file"""'], {}), "('LOG_HANDLE', 'file')\n", (270, 292), False, 'import os\n'), ((311, 340), 'os.getenv', 'os.getenv', (['"""LOG_PATH"""', '"""log/"""'], {}), "('LOG_PATH', 'log/')\n", (320, 340), False, 'import os\n'), ((359, 390), 'os.getenv', 'os.getenv', (['"""LOG_FILE"""', 'SVC_NAME'], {}), "('LOG_FILE', SVC_NAME)\n", (368, 390), False, 'import os\n'), ((429, 452), 'os.getenv', 'os.getenv', (['"""PORT"""', '(8000)'], {}), "('PORT', 8000)\n", (438, 452), False, 'import os\n'), ((470, 493), 'os.getenv', 'os.getenv', (['"""WORKERS"""', '(1)'], {}), "('WORKERS', 1)\n", (479, 493), False, 'import os\n')] |
import logging
l = logging.getLogger("archr.analyzers.datascout")
from ..errors import ArchrError
from . import Analyzer
# Keystone engine 0.9.2 (incorrectly) defaults to radix 16. so we'd better off only using 0x-prefixed integers from now.
# See the related PR: https://github.com/keystone-engine/keystone/pull/382
# and the related issue: https://github.com/keystone-engine/keystone/issues/436
class DataScoutAnalyzer(Analyzer):
"""
Grabs the environment and auxiliary vector from the target.
"""
REQUIRED_IMPLANT = "shellphish_qemu"
def __init__(self, target, analyzer=None):
super().__init__(target)
self.env = None
self.argv = None
self.auxv = None
self.map = None
self.analyzer = analyzer
def _pushstr(self, s):
"""
push a string onto stack
"""
def _cutstr(bits, little=True):
w = bits // 8 # word size
byte_order = -1 if little else 1
n = ["0"] + [s[i:i + w].ljust(w, "\0")[::byte_order].encode('utf-8').hex() for i in range(0, len(s), w)][::-1]
return n
if self.target.target_arch == 'x86_64':
elems = _cutstr(64)
return "".join("mov rax, 0x%s; push rax; " % word for word in elems)
elif self.target.target_arch == 'i386':
elems = _cutstr(32)
return "".join("mov eax, 0x%s; push eax; " % word for word in elems)
elif self.target.target_arch in ('mips', 'mipsel'):
elems = _cutstr(32, little=self.target.target_arch != 'mips')
return "".join("li $t0, 0x%s; addi $sp, $sp, -4; sw $t0, 0($sp);" % word for word in elems)
elif self.target.target_arch == 'arm':
elems = _cutstr(32)
return "".join(f"movw r0, #0x{word} & 0xffff; movt r0, #0x{word} >> 16; push {{r0}};" for word in elems)
else:
raise NotImplementedError()
def read_file_shellcode(self, filename):
"""
shellcode to read the content of a file
"""
if self.target.target_arch == 'x86_64':
return (
self._pushstr(filename) +
"mov rdi, rsp; xor rsi, rsi; xor rdx, rdx; mov rax, 2; syscall;" + # fd = open(path, O_RDONLY, 0)
"mov r12, rax; sub rsp, 0x1000;" + # alloca 0x1000
"loop_head:" +
"xor rax, rax; mov rdi, r12; mov rsi, rsp; mov rdx, 0x1000; syscall;" + # n = read(fd, rsp, 0x1000)
"mov r13, rax;" + # save n
"mov rax, 1; mov rdi, 1; mov rsi, rsp; mov rdx, r13; syscall;" + # write(1, rsp, n)
"test r13, r13; jnz loop_head;" # loop untill we are done with the file
)
elif self.target.target_arch == 'i386':
return (
self._pushstr(filename) +
"mov ebx, esp; xor ecx, ecx; xor edx, edx; mov eax, 5; int 0x80;" + # n = open(path, O_RDONLY, 0)
"mov esi, eax; sub esp, 0x1000;" + # alloca 0x1000, fd = esi
"loop_head:" +
"mov eax, 3; mov ebx, esi; mov ecx, esp; mov edx, 0x1000; int 0x80;" + # n = read(fd, rsp, 0x1000)
"mov edi, eax;"+ # save n
"mov eax, 4; mov ebx, 1; mov ecx, esp; mov edx, edi; int 0x80;" + # write(1, rsp, n)
"test edi, edi; jnz loop_head;" # loop untill we are done with the file
)
elif self.target.target_arch in ('mips', 'mipsel'):
return (
self._pushstr(filename) +
"move $a0, $sp; xor $a1, $a1, $a1; xor $a2, $a2, $a2; li $v0, 0xfa5; syscall;" + # n = open(path, O_RDONLY, 0)
"move $s0, $v0; li $a0, 0x1000; sub $sp, $sp, $a0;" + # alloca 0x1000, fd = $s0
"loop_head:" +
"li $v0, 0xfa3; move $a0, $s0; move $a1, $sp; li $a2, 0x1000; syscall;" + # n = read(fd, rsp, 0x1000)
"move $s1, $v0;" + # save n
"li $v0, 0xfa4; li $a0, 1; move $a1, $sp; move $a2, $s1; syscall;" + # write(1, rsp, n)
"bne $s1, 0, loop_head;" # loop untill we are done with the file
)
elif self.target.target_arch == 'arm':
return (
self._pushstr(filename) +
"mov r0, sp; eor r1, r1; eor r2, r2; mov r7, #5; svc 0;" + # n = open(path, O_RDONLY, 0)
"mov r8, r0; sub sp, sp, 0x1000;" + # alloca 0x1000, fd = $r8
"loop_head:" +
"mov r7, #3; mov r0, r8; mov r1, sp; mov r2, 0x1000; svc 0;" + # n = read(fd, rsp, 0x1000)
"mov r9, r0;" + # save n to r9
"mov r7, #4; mov r0, 1; mov r1, sp; mov r2, r9; svc 0;" + # write(1, rsp, n)
"cmp r9, #0; bne loop_head;" # loop untill we are done with the file
)
else:
raise NotImplementedError("Unknown target architecure: \"%s\"!" % self.target.target_arch)
def echo_shellcode(self, what):
if self.target.target_arch == 'x86_64':
return (
self._pushstr(what) +
"mov rdi, 1; mov rsi, rsp; mov rdx, %#x; mov rax, 1; syscall;" % len(what) # n = write(1, rsp, 0x1000)
)
elif self.target.target_arch == 'i386':
return (
self._pushstr(what) +
"mov ebx, 1; mov ecx, esp; mov edx, %#x; mov eax, 4; int 0x80;" % len(what) # n = write(1, esp, 0x1000)
)
elif self.target.target_arch in ('mips', 'mipsel'):
return (
self._pushstr(what) +
"li $a0, 1; move $a1, $sp; li $a2, %#x; li $v0, 0xfa4; syscall;" % len(what) # n = write(1, sp, 0x1000)
)
elif self.target.target_arch == 'arm':
return (
self._pushstr(what) +
"mov r0, #1; mov r1, sp; mov r2, #%#x; mov r7, #4; svc 0;" % len(what) # n = write(1, sp, 0x1000)
)
else:
raise NotImplementedError()
def brk_shellcode(self):
if self.target.target_arch == 'x86_64':
return "mov rax, 0xc; xor rdi, rdi; syscall; mov rdi, rax; add rdi, 0x1000; mov rax, 0xc; syscall;"
elif self.target.target_arch == 'i386':
# n = brk 0
# brk n + 0x1000
return "mov eax, 0x2d; xor ebx, ebx; int 0x80; mov ebx, eax; add ebx, 0x1000; mov eax, 0x2d; int 0x80;"
elif self.target.target_arch in ('mips', 'mipsel'):
# n = brk 0
# brk n + 0x1000
return "xor $a0, $a0, $a0; li $v0, 0xfcd; syscall; add $a0, $v0, 0x1000; li $v0, 0xfcd; syscall;"
elif self.target.target_arch == 'arm':
# n = brk 0
# brk n + 0x1000
return "eor r0, r0; mov r7, #0x2d; svc 0; add r0, #0x1000; mov r7, #0x2d; svc 0;"
else:
raise NotImplementedError()
def exit_shellcode(self, exit_code=42):
if self.target.target_arch == 'x86_64':
return "mov rdi, %#x; mov rax, 0x3c; syscall;" % exit_code # exit(code)
elif self.target.target_arch == 'i386':
return "mov ebx, %#x; mov eax, 1; int 0x80;" % exit_code # exit(code)
elif self.target.target_arch in ('mips', 'mipsel'):
return "li $a0, %#x; li $v0, 0xfa1; syscall;" % exit_code # exit(code)
elif self.target.target_arch == 'arm':
return "mov r0, #%#x; mov r7, #1; svc 0;" % exit_code # exit(code)
else:
raise NotImplementedError()
def run_shellcode(self, shellcode, aslr=False, **kwargs):
exit_code = 42
# build the args
if self.analyzer:
args = self.analyzer._build_command()
else:
args = self.target.target_args
# run command within the shellcode context
with self.target.shellcode_context(args, asm_code=shellcode+self.exit_shellcode(exit_code=exit_code), aslr=aslr, **kwargs) as p:
output, stderr = p.communicate()
if p.returncode != exit_code:
raise ArchrError("DataScout failed to get info from the target process.\n"
"stdout: %s\nstderr: %s" % (output, stderr))
return output
def fire(self, aslr=False, **kwargs): #pylint:disable=arguments-differ
if self.target.target_os == 'cgc':
return [], [], b'', {}
if not self.argv:
output = self.run_shellcode(self.read_file_shellcode("/proc/self/cmdline"), aslr=aslr, **kwargs)
self.argv = output.split(b'\0')[:-1]
if not self.env:
output = self.run_shellcode(self.read_file_shellcode("/proc/self/environ"), aslr=aslr, **kwargs)
self.env = output.split(b'\0')[:-1]
if not self.auxv:
output = self.run_shellcode(self.read_file_shellcode("/proc/self/auxv"), aslr=aslr, **kwargs)
self.auxv = output
if not self.map:
output = self.run_shellcode(self.brk_shellcode()+self.read_file_shellcode("/proc/self/maps"), aslr=aslr, **kwargs)
self.map = parse_proc_maps(output)
return self.argv, self.env, self.auxv, self.map
from ..utils import parse_proc_maps
| [
"logging.getLogger"
] | [((20, 66), 'logging.getLogger', 'logging.getLogger', (['"""archr.analyzers.datascout"""'], {}), "('archr.analyzers.datascout')\n", (37, 66), False, 'import logging\n')] |
import pytest
from capreolus.collection import Collection, DummyCollection
from capreolus.index import Index
from capreolus.index import AnseriniIndex
from capreolus.tests.common_fixtures import tmpdir_as_cache, dummy_index
def test_anserini_create_index(tmpdir_as_cache):
index = AnseriniIndex({"_name": "anserini", "indexstops": False, "stemmer": "porter"})
index.modules["collection"] = DummyCollection({"_name": "dummy"})
assert not index.exists()
index.create_index()
assert index.exists()
def test_anserini_get_docs(tmpdir_as_cache, dummy_index):
docs = dummy_index.get_docs(["LA010189-0001"])
assert docs == ["Dummy Dummy Dummy Hello world, greetings from outer space!"]
docs = dummy_index.get_docs(["LA010189-0001", "LA010189-0002"])
assert docs == [
"Dummy Dummy Dummy Hello world, greetings from outer space!",
"Dummy LessDummy Hello world, greetings from outer space!",
]
def test_anserini_get_df(tmpdir_as_cache, dummy_index):
df = dummy_index.get_df("hello")
assert df == 2
def test_anserini_get_idf(tmpdir_as_cache, dummy_index):
idf = dummy_index.get_idf("hello")
assert idf == 0.1823215567939546
| [
"capreolus.tests.common_fixtures.dummy_index.get_idf",
"capreolus.collection.DummyCollection",
"capreolus.tests.common_fixtures.dummy_index.get_docs",
"capreolus.tests.common_fixtures.dummy_index.get_df",
"capreolus.index.AnseriniIndex"
] | [((288, 366), 'capreolus.index.AnseriniIndex', 'AnseriniIndex', (["{'_name': 'anserini', 'indexstops': False, 'stemmer': 'porter'}"], {}), "({'_name': 'anserini', 'indexstops': False, 'stemmer': 'porter'})\n", (301, 366), False, 'from capreolus.index import AnseriniIndex\n'), ((401, 436), 'capreolus.collection.DummyCollection', 'DummyCollection', (["{'_name': 'dummy'}"], {}), "({'_name': 'dummy'})\n", (416, 436), False, 'from capreolus.collection import Collection, DummyCollection\n'), ((589, 628), 'capreolus.tests.common_fixtures.dummy_index.get_docs', 'dummy_index.get_docs', (["['LA010189-0001']"], {}), "(['LA010189-0001'])\n", (609, 628), False, 'from capreolus.tests.common_fixtures import tmpdir_as_cache, dummy_index\n'), ((722, 778), 'capreolus.tests.common_fixtures.dummy_index.get_docs', 'dummy_index.get_docs', (["['LA010189-0001', 'LA010189-0002']"], {}), "(['LA010189-0001', 'LA010189-0002'])\n", (742, 778), False, 'from capreolus.tests.common_fixtures import tmpdir_as_cache, dummy_index\n'), ((1011, 1038), 'capreolus.tests.common_fixtures.dummy_index.get_df', 'dummy_index.get_df', (['"""hello"""'], {}), "('hello')\n", (1029, 1038), False, 'from capreolus.tests.common_fixtures import tmpdir_as_cache, dummy_index\n'), ((1127, 1155), 'capreolus.tests.common_fixtures.dummy_index.get_idf', 'dummy_index.get_idf', (['"""hello"""'], {}), "('hello')\n", (1146, 1155), False, 'from capreolus.tests.common_fixtures import tmpdir_as_cache, dummy_index\n')] |
import logging
try:
from asyncpg import create_pool
except ModuleNotFoundError:
logging.warning('Database not set up, install asyncpg')
from noheavenbot.utils.constants import EnvVariables
class Database:
@classmethod
async def connect(cls):
credentials = {'user': EnvVariables.get('DB_USER'),
'password': EnvVariables.get('DB_PASS'),
'database': EnvVariables.get('DB_DATABASE'),
'host': EnvVariables.get('DB_HOST'),
'port': EnvVariables.get('DB_PORT')}
return await create_pool(**credentials)
| [
"noheavenbot.utils.constants.EnvVariables.get",
"logging.warning",
"asyncpg.create_pool"
] | [((89, 144), 'logging.warning', 'logging.warning', (['"""Database not set up, install asyncpg"""'], {}), "('Database not set up, install asyncpg')\n", (104, 144), False, 'import logging\n'), ((294, 321), 'noheavenbot.utils.constants.EnvVariables.get', 'EnvVariables.get', (['"""DB_USER"""'], {}), "('DB_USER')\n", (310, 321), False, 'from noheavenbot.utils.constants import EnvVariables\n'), ((358, 385), 'noheavenbot.utils.constants.EnvVariables.get', 'EnvVariables.get', (['"""DB_PASS"""'], {}), "('DB_PASS')\n", (374, 385), False, 'from noheavenbot.utils.constants import EnvVariables\n'), ((422, 453), 'noheavenbot.utils.constants.EnvVariables.get', 'EnvVariables.get', (['"""DB_DATABASE"""'], {}), "('DB_DATABASE')\n", (438, 453), False, 'from noheavenbot.utils.constants import EnvVariables\n'), ((486, 513), 'noheavenbot.utils.constants.EnvVariables.get', 'EnvVariables.get', (['"""DB_HOST"""'], {}), "('DB_HOST')\n", (502, 513), False, 'from noheavenbot.utils.constants import EnvVariables\n'), ((546, 573), 'noheavenbot.utils.constants.EnvVariables.get', 'EnvVariables.get', (['"""DB_PORT"""'], {}), "('DB_PORT')\n", (562, 573), False, 'from noheavenbot.utils.constants import EnvVariables\n'), ((597, 623), 'asyncpg.create_pool', 'create_pool', ([], {}), '(**credentials)\n', (608, 623), False, 'from asyncpg import create_pool\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['HostAccountUserGroupAttachmentArgs', 'HostAccountUserGroupAttachment']
@pulumi.input_type
class HostAccountUserGroupAttachmentArgs:
def __init__(__self__, *,
host_account_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
host_id: pulumi.Input[str],
instance_id: pulumi.Input[str],
user_group_id: pulumi.Input[str]):
"""
The set of arguments for constructing a HostAccountUserGroupAttachment resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_ids: A list IDs of the host account.
:param pulumi.Input[str] host_id: The ID of the host.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user group to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
pulumi.set(__self__, "host_account_ids", host_account_ids)
pulumi.set(__self__, "host_id", host_id)
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountIds")
def host_account_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list IDs of the host account.
"""
return pulumi.get(self, "host_account_ids")
@host_account_ids.setter
def host_account_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "host_account_ids", value)
@property
@pulumi.getter(name="hostId")
def host_id(self) -> pulumi.Input[str]:
"""
The ID of the host.
"""
return pulumi.get(self, "host_id")
@host_id.setter
def host_id(self, value: pulumi.Input[str]):
pulumi.set(self, "host_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
The ID of the Bastionhost instance where you want to authorize the user group to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Input[str]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_group_id", value)
@pulumi.input_type
class _HostAccountUserGroupAttachmentState:
def __init__(__self__, *,
host_account_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering HostAccountUserGroupAttachment resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_ids: A list IDs of the host account.
:param pulumi.Input[str] host_id: The ID of the host.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user group to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
if host_account_ids is not None:
pulumi.set(__self__, "host_account_ids", host_account_ids)
if host_id is not None:
pulumi.set(__self__, "host_id", host_id)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if user_group_id is not None:
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="hostAccountIds")
def host_account_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list IDs of the host account.
"""
return pulumi.get(self, "host_account_ids")
@host_account_ids.setter
def host_account_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "host_account_ids", value)
@property
@pulumi.getter(name="hostId")
def host_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the host.
"""
return pulumi.get(self, "host_id")
@host_id.setter
def host_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Bastionhost instance where you want to authorize the user group to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_group_id", value)
class HostAccountUserGroupAttachment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl32bh0no30",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range['value']}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range['value']}",
host_id=default_host.host_id,
protocol_name="SSH",
password="<PASSWORD>"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id="bastionhost-cn-tl32bh0no30",
user_group_name=var["name"])
default_host_account_user_group_attachment = alicloud.bastionhost.HostAccountUserGroupAttachment("defaultHostAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_id=default_host.host_id,
host_account_ids=[__item.host_account_id for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostAccountUserGroupAttachment:HostAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_ids: A list IDs of the host account.
:param pulumi.Input[str] host_id: The ID of the host.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user group to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HostAccountUserGroupAttachmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl32bh0no30",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range['value']}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range['value']}",
host_id=default_host.host_id,
protocol_name="SSH",
password="<PASSWORD>"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id="bastionhost-cn-tl32bh0no30",
user_group_name=var["name"])
default_host_account_user_group_attachment = alicloud.bastionhost.HostAccountUserGroupAttachment("defaultHostAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_id=default_host.host_id,
host_account_ids=[__item.host_account_id for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostAccountUserGroupAttachment:HostAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_id>
```
:param str resource_name: The name of the resource.
:param HostAccountUserGroupAttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HostAccountUserGroupAttachmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host_account_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HostAccountUserGroupAttachmentArgs.__new__(HostAccountUserGroupAttachmentArgs)
if host_account_ids is None and not opts.urn:
raise TypeError("Missing required property 'host_account_ids'")
__props__.__dict__["host_account_ids"] = host_account_ids
if host_id is None and not opts.urn:
raise TypeError("Missing required property 'host_id'")
__props__.__dict__["host_id"] = host_id
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
if user_group_id is None and not opts.urn:
raise TypeError("Missing required property 'user_group_id'")
__props__.__dict__["user_group_id"] = user_group_id
super(HostAccountUserGroupAttachment, __self__).__init__(
'alicloud:bastionhost/hostAccountUserGroupAttachment:HostAccountUserGroupAttachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
host_account_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host_id: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
user_group_id: Optional[pulumi.Input[str]] = None) -> 'HostAccountUserGroupAttachment':
"""
Get an existing HostAccountUserGroupAttachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_ids: A list IDs of the host account.
:param pulumi.Input[str] host_id: The ID of the host.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user group to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostAccountUserGroupAttachmentState.__new__(_HostAccountUserGroupAttachmentState)
__props__.__dict__["host_account_ids"] = host_account_ids
__props__.__dict__["host_id"] = host_id
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["user_group_id"] = user_group_id
return HostAccountUserGroupAttachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="hostAccountIds")
def host_account_ids(self) -> pulumi.Output[Sequence[str]]:
"""
A list IDs of the host account.
"""
return pulumi.get(self, "host_account_ids")
@property
@pulumi.getter(name="hostId")
def host_id(self) -> pulumi.Output[str]:
"""
The ID of the host.
"""
return pulumi.get(self, "host_id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
"""
The ID of the Bastionhost instance where you want to authorize the user group to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> pulumi.Output[str]:
"""
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
"""
return pulumi.get(self, "user_group_id")
| [
"pulumi.getter",
"pulumi.set",
"pulumi.ResourceOptions",
"pulumi.get"
] | [((1590, 1626), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""hostAccountIds"""'}), "(name='hostAccountIds')\n", (1603, 1626), False, 'import pulumi\n'), ((2004, 2032), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""hostId"""'}), "(name='hostId')\n", (2017, 2032), False, 'import pulumi\n'), ((2305, 2337), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""instanceId"""'}), "(name='instanceId')\n", (2318, 2337), False, 'import pulumi\n'), ((2737, 2770), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""userGroupId"""'}), "(name='userGroupId')\n", (2750, 2770), False, 'import pulumi\n'), ((4554, 4590), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""hostAccountIds"""'}), "(name='hostAccountIds')\n", (4567, 4590), False, 'import pulumi\n'), ((4988, 5016), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""hostId"""'}), "(name='hostId')\n", (5001, 5016), False, 'import pulumi\n'), ((5309, 5341), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""instanceId"""'}), "(name='instanceId')\n", (5322, 5341), False, 'import pulumi\n'), ((5761, 5794), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""userGroupId"""'}), "(name='userGroupId')\n", (5774, 5794), False, 'import pulumi\n'), ((15994, 16030), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""hostAccountIds"""'}), "(name='hostAccountIds')\n", (16007, 16030), False, 'import pulumi\n'), ((16231, 16259), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""hostId"""'}), "(name='hostId')\n", (16244, 16259), False, 'import pulumi\n'), ((16420, 16452), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""instanceId"""'}), "(name='instanceId')\n", (16433, 16452), False, 'import pulumi\n'), ((16728, 16761), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""userGroupId"""'}), "(name='userGroupId')\n", (16741, 16761), False, 'import pulumi\n'), ((1344, 1402), 'pulumi.set', 'pulumi.set', (['__self__', '"""host_account_ids"""', 'host_account_ids'], {}), "(__self__, 'host_account_ids', host_account_ids)\n", (1354, 1402), False, 'import pulumi\n'), ((1411, 1451), 'pulumi.set', 'pulumi.set', (['__self__', '"""host_id"""', 'host_id'], {}), "(__self__, 'host_id', host_id)\n", (1421, 1451), False, 'import pulumi\n'), ((1460, 1508), 'pulumi.set', 'pulumi.set', (['__self__', '"""instance_id"""', 'instance_id'], {}), "(__self__, 'instance_id', instance_id)\n", (1470, 1508), False, 'import pulumi\n'), ((1517, 1569), 'pulumi.set', 'pulumi.set', (['__self__', '"""user_group_id"""', 'user_group_id'], {}), "(__self__, 'user_group_id', user_group_id)\n", (1527, 1569), False, 'import pulumi\n'), ((1783, 1819), 'pulumi.get', 'pulumi.get', (['self', '"""host_account_ids"""'], {}), "(self, 'host_account_ids')\n", (1793, 1819), False, 'import pulumi\n'), ((1940, 1983), 'pulumi.set', 'pulumi.set', (['self', '"""host_account_ids"""', 'value'], {}), "(self, 'host_account_ids', value)\n", (1950, 1983), False, 'import pulumi\n'), ((2144, 2171), 'pulumi.get', 'pulumi.get', (['self', '"""host_id"""'], {}), "(self, 'host_id')\n", (2154, 2171), False, 'import pulumi\n'), ((2250, 2284), 'pulumi.set', 'pulumi.set', (['self', '"""host_id"""', 'value'], {}), "(self, 'host_id', value)\n", (2260, 2284), False, 'import pulumi\n'), ((2560, 2591), 'pulumi.get', 'pulumi.get', (['self', '"""instance_id"""'], {}), "(self, 'instance_id')\n", (2570, 2591), False, 'import pulumi\n'), ((2678, 2716), 'pulumi.set', 'pulumi.set', (['self', '"""instance_id"""', 'value'], {}), "(self, 'instance_id', value)\n", (2688, 2716), False, 'import pulumi\n'), ((2969, 3002), 'pulumi.get', 'pulumi.get', (['self', '"""user_group_id"""'], {}), "(self, 'user_group_id')\n", (2979, 3002), False, 'import pulumi\n'), ((3093, 3133), 'pulumi.set', 'pulumi.set', (['self', '"""user_group_id"""', 'value'], {}), "(self, 'user_group_id', value)\n", (3103, 3133), False, 'import pulumi\n'), ((4757, 4793), 'pulumi.get', 'pulumi.get', (['self', '"""host_account_ids"""'], {}), "(self, 'host_account_ids')\n", (4767, 4793), False, 'import pulumi\n'), ((4924, 4967), 'pulumi.set', 'pulumi.set', (['self', '"""host_account_ids"""', 'value'], {}), "(self, 'host_account_ids', value)\n", (4934, 4967), False, 'import pulumi\n'), ((5138, 5165), 'pulumi.get', 'pulumi.get', (['self', '"""host_id"""'], {}), "(self, 'host_id')\n", (5148, 5165), False, 'import pulumi\n'), ((5254, 5288), 'pulumi.set', 'pulumi.set', (['self', '"""host_id"""', 'value'], {}), "(self, 'host_id', value)\n", (5264, 5288), False, 'import pulumi\n'), ((5574, 5605), 'pulumi.get', 'pulumi.get', (['self', '"""instance_id"""'], {}), "(self, 'instance_id')\n", (5584, 5605), False, 'import pulumi\n'), ((5702, 5740), 'pulumi.set', 'pulumi.set', (['self', '"""instance_id"""', 'value'], {}), "(self, 'instance_id', value)\n", (5712, 5740), False, 'import pulumi\n'), ((6003, 6036), 'pulumi.get', 'pulumi.get', (['self', '"""user_group_id"""'], {}), "(self, 'user_group_id')\n", (6013, 6036), False, 'import pulumi\n'), ((6137, 6177), 'pulumi.set', 'pulumi.set', (['self', '"""user_group_id"""', 'value'], {}), "(self, 'user_group_id', value)\n", (6147, 6177), False, 'import pulumi\n'), ((16174, 16210), 'pulumi.get', 'pulumi.get', (['self', '"""host_account_ids"""'], {}), "(self, 'host_account_ids')\n", (16184, 16210), False, 'import pulumi\n'), ((16372, 16399), 'pulumi.get', 'pulumi.get', (['self', '"""host_id"""'], {}), "(self, 'host_id')\n", (16382, 16399), False, 'import pulumi\n'), ((16676, 16707), 'pulumi.get', 'pulumi.get', (['self', '"""instance_id"""'], {}), "(self, 'instance_id')\n", (16686, 16707), False, 'import pulumi\n'), ((16961, 16994), 'pulumi.get', 'pulumi.get', (['self', '"""user_group_id"""'], {}), "(self, 'user_group_id')\n", (16971, 16994), False, 'import pulumi\n'), ((4190, 4248), 'pulumi.set', 'pulumi.set', (['__self__', '"""host_account_ids"""', 'host_account_ids'], {}), "(__self__, 'host_account_ids', host_account_ids)\n", (4200, 4248), False, 'import pulumi\n'), ((4293, 4333), 'pulumi.set', 'pulumi.set', (['__self__', '"""host_id"""', 'host_id'], {}), "(__self__, 'host_id', host_id)\n", (4303, 4333), False, 'import pulumi\n'), ((4382, 4430), 'pulumi.set', 'pulumi.set', (['__self__', '"""instance_id"""', 'instance_id'], {}), "(__self__, 'instance_id', instance_id)\n", (4392, 4430), False, 'import pulumi\n'), ((4481, 4533), 'pulumi.set', 'pulumi.set', (['__self__', '"""user_group_id"""', 'user_group_id'], {}), "(__self__, 'user_group_id', user_group_id)\n", (4491, 4533), False, 'import pulumi\n'), ((12541, 12565), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (12563, 12565), False, 'import pulumi\n'), ((15515, 15544), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (15537, 15544), False, 'import pulumi\n')] |
import pytest
from coordinator.api.models import Study, Task
from coordinator.api.factories.release import ReleaseFactory
ALL_TASKS = """
query (
$state: String,
$createdBefore: Float,
$createdAfter: Float,
$orderBy:String
) {
allTasks(
state: $state,
createdBefore: $createdBefore,
createdAfter: $createdAfter,
orderBy: $orderBy
) {
edges {
node {
id
kfId
uuid
state
createdAt
}
}
}
}
"""
@pytest.mark.parametrize(
"user_type,expected",
[
("admin", lambda: 30),
("dev", lambda: 30),
("user", lambda: 20),
("anon", lambda: 10),
],
)
def test_list_all_tasks_permissions(db, test_client, user_type, expected):
"""
ADMIN - Can query all tasks
DEV - Can query all tasks
USER - Can query tasks from published releases, or releases that they
have a study in.
anonomous - Can query tasks from published releases
"""
study = Study(kf_id="SD_00000001")
study.save()
releases = ReleaseFactory.create_batch(10, state="staged")
releases = ReleaseFactory.create_batch(10, state="published")
releases = ReleaseFactory.create_batch(
10, state="staging", studies=[study]
)
client = test_client(user_type)
resp = client.post("/graphql", data={"query": ALL_TASKS})
# Test that the correct number of releases are returned
assert len(resp.json()["data"]["allTasks"]["edges"]) == expected()
| [
"coordinator.api.models.Study",
"pytest.mark.parametrize",
"coordinator.api.factories.release.ReleaseFactory.create_batch"
] | [((575, 719), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""user_type,expected"""', "[('admin', lambda : 30), ('dev', lambda : 30), ('user', lambda : 20), (\n 'anon', lambda : 10)]"], {}), "('user_type,expected', [('admin', lambda : 30), (\n 'dev', lambda : 30), ('user', lambda : 20), ('anon', lambda : 10)])\n", (598, 719), False, 'import pytest\n'), ((1084, 1110), 'coordinator.api.models.Study', 'Study', ([], {'kf_id': '"""SD_00000001"""'}), "(kf_id='SD_00000001')\n", (1089, 1110), False, 'from coordinator.api.models import Study, Task\n'), ((1144, 1191), 'coordinator.api.factories.release.ReleaseFactory.create_batch', 'ReleaseFactory.create_batch', (['(10)'], {'state': '"""staged"""'}), "(10, state='staged')\n", (1171, 1191), False, 'from coordinator.api.factories.release import ReleaseFactory\n'), ((1207, 1257), 'coordinator.api.factories.release.ReleaseFactory.create_batch', 'ReleaseFactory.create_batch', (['(10)'], {'state': '"""published"""'}), "(10, state='published')\n", (1234, 1257), False, 'from coordinator.api.factories.release import ReleaseFactory\n'), ((1273, 1338), 'coordinator.api.factories.release.ReleaseFactory.create_batch', 'ReleaseFactory.create_batch', (['(10)'], {'state': '"""staging"""', 'studies': '[study]'}), "(10, state='staging', studies=[study])\n", (1300, 1338), False, 'from coordinator.api.factories.release import ReleaseFactory\n')] |
import random
print("\tWelcome to 'Guess My Number!'")
# This stores the previous guesses and tells us whether they are right
class oldGuesses(object):
def __init__(self, low, high):
# This is the initialization data
self.guesses = []
self.low = low
self.high = high
# This function checks to see the guess isn't in the list and it adds it
def addNewGuess(self, guess):
if guess not in self.guesses:
self.guesses.append(guess)
def checkIfPossible(self, guess):
# This function checks if the guess is in the range or already in the list
isPossible = []
if self.low < guess < self.high:
isPossible.append(True)
else:
isPossible.append(False)
if guess in self.guesses:
isPossible.append(False)
else:
isPossible.append(True)
self.addNewGuess(guess)
if False in isPossible:
return False
else:
return True
def game():
# Ask the User the input number
lowest = int(input("\nWhat do you want the lowest (min) number to be? "))
highest = int(input("\nWhat do you want the highest (max) number to be? "))
# Telling the User the numbers again
print("Ok, I'm thinking of a number between ", lowest, " and", highest)
# Creating the new random number and initializing the game
the_number = random.randint(lowest, highest)
# I felt it was easier to have this class to check if the guess was possible
guesses = oldGuesses(lowest, highest)
tries = 0
x = True
while x:
guess = int(input("\nTake a Guess: "))
ifPossible = guesses.checkIfPossible(guess)
if ifPossible:
tries += 1
x = False
else:
print("Try Again.")
# The while loop until the user gets the right answer
while guess != the_number:
# Logic for the Game
if guess > the_number:
if highest > guess:
highest = guess
# Give the user their hint
print("\nLower..."
"\nHint:",lowest, "< x <", highest)
else:
if lowest < guess:
lowest = guess
# Give the user their hint
print("\nHigher..."
"\nHint:",lowest, "< x <", highest)
# Continues the loop
x = True
while x:
guess = int(input("\nTake a Guess: "))
ifPossible = guesses.checkIfPossible(guess)
if ifPossible:
tries += 1
x = False
else:
print("Try Again.")
# Congratulate User and tell them their score
print("\nYou Guessed It! The number was ", the_number)
# Change the message depending on the score
if tries < 5:
print("Nice Job! You did it in ", tries, " tries. Its a high score!")
else:
print("You did it in ", tries, " tries")
# Running the Game
game()
response = input("\nPlay Again (y/n)? ").lower()
# While Loop so the game keeps going whenever the player says y or Y
while response == "y":
game()
response = input("\nPlay Again (y/n)? ")
# Terminating the Program
print("\nThanks for playing!")
input("Enter to Exit") | [
"random.randint"
] | [((1509, 1540), 'random.randint', 'random.randint', (['lowest', 'highest'], {}), '(lowest, highest)\n', (1523, 1540), False, 'import random\n')] |
#!/usr/bin/env python3
#coding:utf-8
import os, sys
from sys import exit
from utils.excel import read_excel_data
import utils.path as utils_path
from utils.logger import Logger
os.chdir(utils_path.TOOLS_ROOT_PATH)
key_map = {
"cmd" : "string",
"lang" : "string",
"script" : "string",
"args" : "string",
"output_client" : "string",
"output_server" : "string",
}
cmd_info_map = read_excel_data("./export_cmds.xlsx", "main", key_map, "cmd")
logger = Logger(sys.stderr)
def save_autocode(filepath, data):
dirpath = os.path.dirname(filepath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
data = data.replace("\r\n", "\n")
with open(filepath, "w+") as f:
f.write(data)
f.flush()
f.close()
def gen_autocode_by_info(cmd_info):
if "output_client" in cmd_info:
pfile = os.popen("%s %s %s %s"%(cmd_info["lang"], cmd_info["script"], "-t c", cmd_info["args"]))
data = pfile.read()
pfile.close()
save_autocode(cmd_info["output_client"], data)
if "output_server" in cmd_info:
pfile = os.popen("%s %s %s %s"%(cmd_info["lang"], cmd_info["script"], "-t s", cmd_info["args"]))
data = pfile.read()
pfile.close()
save_autocode(cmd_info["output_server"], data)
return True
def gen_autocode(cmd):
cmd_info = cmd_info_map[cmd]
logger.info("start gen %s"%cmd)
ok = gen_autocode_by_info(cmd_info)
if ok:
logger.info("gen %s success"%cmd)
else:
logger.info("gen %s failed"%cmd)
def main():
if len(sys.argv) == 1:
while(True):
cmd = input("cmd:")
gen_autocode(cmd)
elif len(sys.argv) == 2:
cmd = sys.argv[1]
gen_autocode(cmd)
if __name__ == "__main__":
main()
| [
"utils.excel.read_excel_data",
"os.path.exists",
"os.makedirs",
"os.chdir",
"os.path.dirname",
"os.popen",
"utils.logger.Logger"
] | [((179, 215), 'os.chdir', 'os.chdir', (['utils_path.TOOLS_ROOT_PATH'], {}), '(utils_path.TOOLS_ROOT_PATH)\n', (187, 215), False, 'import os, sys\n'), ((444, 505), 'utils.excel.read_excel_data', 'read_excel_data', (['"""./export_cmds.xlsx"""', '"""main"""', 'key_map', '"""cmd"""'], {}), "('./export_cmds.xlsx', 'main', key_map, 'cmd')\n", (459, 505), False, 'from utils.excel import read_excel_data\n'), ((516, 534), 'utils.logger.Logger', 'Logger', (['sys.stderr'], {}), '(sys.stderr)\n', (522, 534), False, 'from utils.logger import Logger\n'), ((585, 610), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (600, 610), False, 'import os, sys\n'), ((622, 645), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (636, 645), False, 'import os, sys\n'), ((655, 675), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (666, 675), False, 'import os, sys\n'), ((899, 993), 'os.popen', 'os.popen', (["('%s %s %s %s' % (cmd_info['lang'], cmd_info['script'], '-t c', cmd_info[\n 'args']))"], {}), "('%s %s %s %s' % (cmd_info['lang'], cmd_info['script'], '-t c',\n cmd_info['args']))\n", (907, 993), False, 'import os, sys\n'), ((1146, 1240), 'os.popen', 'os.popen', (["('%s %s %s %s' % (cmd_info['lang'], cmd_info['script'], '-t s', cmd_info[\n 'args']))"], {}), "('%s %s %s %s' % (cmd_info['lang'], cmd_info['script'], '-t s',\n cmd_info['args']))\n", (1154, 1240), False, 'import os, sys\n')] |
from django.conf.urls import url,include
from django.contrib import admin
from . import views
app_name = 'synchro'
urlpatterns =[
url(r'^semaphores/$', views.semaphores, name='semaphores'),
url(r'^socket/$', views.socket, name='socket'),
url(r'^deadlocks/$', views.deadlocks, name='deadlocks'),
url(r'^semaphores/demo/(?P<pk>[0-9]+)/$', views.sem_demo, name='sem_demo'),
url(r'^socket/demo/(?P<pk>[0-9]+)/$', views.socket_demo, name='socket_demo'),
url(r'^bankalgo/$', views.bankalgo, name='bankalgo'),
]
| [
"django.conf.urls.url"
] | [((141, 198), 'django.conf.urls.url', 'url', (['"""^semaphores/$"""', 'views.semaphores'], {'name': '"""semaphores"""'}), "('^semaphores/$', views.semaphores, name='semaphores')\n", (144, 198), False, 'from django.conf.urls import url, include\n'), ((206, 251), 'django.conf.urls.url', 'url', (['"""^socket/$"""', 'views.socket'], {'name': '"""socket"""'}), "('^socket/$', views.socket, name='socket')\n", (209, 251), False, 'from django.conf.urls import url, include\n'), ((259, 313), 'django.conf.urls.url', 'url', (['"""^deadlocks/$"""', 'views.deadlocks'], {'name': '"""deadlocks"""'}), "('^deadlocks/$', views.deadlocks, name='deadlocks')\n", (262, 313), False, 'from django.conf.urls import url, include\n'), ((321, 394), 'django.conf.urls.url', 'url', (['"""^semaphores/demo/(?P<pk>[0-9]+)/$"""', 'views.sem_demo'], {'name': '"""sem_demo"""'}), "('^semaphores/demo/(?P<pk>[0-9]+)/$', views.sem_demo, name='sem_demo')\n", (324, 394), False, 'from django.conf.urls import url, include\n'), ((402, 477), 'django.conf.urls.url', 'url', (['"""^socket/demo/(?P<pk>[0-9]+)/$"""', 'views.socket_demo'], {'name': '"""socket_demo"""'}), "('^socket/demo/(?P<pk>[0-9]+)/$', views.socket_demo, name='socket_demo')\n", (405, 477), False, 'from django.conf.urls import url, include\n'), ((485, 536), 'django.conf.urls.url', 'url', (['"""^bankalgo/$"""', 'views.bankalgo'], {'name': '"""bankalgo"""'}), "('^bankalgo/$', views.bankalgo, name='bankalgo')\n", (488, 536), False, 'from django.conf.urls import url, include\n')] |
import websocket
from threading import Thread
from bot.Command import Command
import time
class Bot:
def __init__(self, username, password, host):
self.__commands = dict()
self._username = username
self._password = password
self.__host = host
self.__threadStarted = False
self.__thread = None
def connect(self):
self.__websocket = websocket.create_connection(self.__host)
def join(self, channel):
self.__websocket.send("JOIN #" + channel)
def send(self, msg):
self.__websocket.send(msg)
def send_message_to(self, channel, message=""):
self.__websocket.send("PRIVMSG #" + channel + " :" + message)
def start_listening(self, callback):
try:
if not self.__threadStarted:
self.__thread = Thread(target=self.__listen_function__, args=(callback,))
self.__thread.deamon = True
self.__threadStarted = True
self.__thread.start()
except Exception:
pass
def __listen_function__(self, callback):
try:
while self.__threadStarted:
received = self.__websocket.recv()
callback(received)
except Exception:
pass
def stop_listening(self):
self.__threadStarted = False
self.__thread = None
def disconnect(self):
if self.__threadStarted:
self.stop_listening()
self.__websocket.close()
def add_command(self, command):
if isinstance(command, Command):
self.__commands[command.name()] = command
else:
raise ValueError("\"command\" must be an instance of class Command")
def responds_to(self, cmd):
return cmd in self.__commands
def execute_command(self, cmd, params):
if self.responds_to(cmd):
self.__commands[cmd].execute(params)
class TwitchBot(Bot):
def __init__(self, username, password):
super().__init__(username, password, "ws://irc-ws.chat.twitch.tv:80")
self.on_message = self.__default_on_message
self.on_command = self.__default_on_command
self.unknown_command = self.__defualt_unknown_command
def connect(self, channels=[]):
super().connect()
super().send("PASS " + self._password)
super().send("NICK " + self._username)
for channel in channels:
self.join(channel)
def start_listening(self, callback=None ):
if callback is None:
super().start_listening(self.dispatch)
else:
super().start_listening(callback)
def dispatch(self, msg):
if msg == "PING :tmi.twitch.tv":
super().send("PONG :tmi.twitch.tv")
else:
try:
finenome = msg.index("!")
who = msg[1:finenome]
inizioCanale = msg.index("#")
fineCanale = msg.index(" :")
canale = msg[inizioCanale+1:fineCanale]
content = msg[fineCanale+2: ]
if content.startswith("!"):
cmd, other = self.__parse_command(content+" ")
self.on_command(cmd.strip(), other.strip(), who, canale)
else:
self.on_message(content.strip(), who, canale)
except Exception:
pass
def __parse_command(self, str):
try:
finecomando = str.find(" ")
cmd = str[1:finecomando]
content = str[finecomando+1:]
return cmd, content
except Exception as ex:
pass
def __default_on_message(self, msg, who, channel):
pass
def __default_on_command(self, cmd, other, who, channel):
if super().responds_to(cmd):
super().execute_command(cmd, [other, who, channel])
else:
self.unknown_command(cmd, who, channel)
def __defualt_unknown_command(self, cmd, who, channel):
super().send_message_to("#"+channel, "@" + who + ", unknown command")
| [
"threading.Thread",
"websocket.create_connection"
] | [((399, 439), 'websocket.create_connection', 'websocket.create_connection', (['self.__host'], {}), '(self.__host)\n', (426, 439), False, 'import websocket\n'), ((833, 890), 'threading.Thread', 'Thread', ([], {'target': 'self.__listen_function__', 'args': '(callback,)'}), '(target=self.__listen_function__, args=(callback,))\n', (839, 890), False, 'from threading import Thread\n')] |
try: # Python 2
from Tkinter import * # noqa
except ImportError: # Python 3
from tkinter import * # noqa
import time
from rectbutton import RectButton
from serial_connection import SerialConnection
UPDATE_MS = 20
DISPLAY_MS = 125
class DispensingScreen(Frame):
def __init__(self, master, recipe, amount):
super(DispensingScreen, self).__init__(master)
self.master = master
self.ser = SerialConnection()
self.recipe = recipe
self.last_disp = 0.0
self.desc = Text(self, relief=FLAT, wrap=NONE, state=DISABLED)
backbtn = RectButton(self, text="Abbruch", command=self.handle_button_back)
self.bgcolor = master.bgcolor
self.configure(bg=self.bgcolor)
self.desc.grid(column=0, row=0, sticky=N+E+W+S)
backbtn.grid(column=0, row=1, padx=10, pady=10, sticky=E+W+S)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
recipe.startDispensing(amount)
self.pid = self.after(UPDATE_MS, self.update_screen)
def update_screen(self):
self.pid = None
recipe = self.recipe
recipe.updateDispensing()
#now = time.time() * 1000.0
#if now - self.last_disp >= 1:
#if now - self.last_disp >= DISPLAY_MS:
#self.last_disp = now
self.desc.config(state=NORMAL)
self.desc.delete(0.0, END)
self.desc.tag_config("header", background="#077", foreground="white")
self.desc.tag_config("ingr", lmargin1=10, lmargin2=20)
self.desc.tag_config("percent", foreground="#c44")
self.desc.insert(END, "Dispensing: %s\n" % recipe.getName(), "header")
for ingr in recipe.dispensing:
self.desc.insert(END, ingr.readableDesc(metric=self.master.use_metric), "ingr")
self.desc.insert(END, " ")
self.desc.insert(END, "%.0f%%\n" % ingr.percentDone(), 'percent')
self.desc.config(state=DISABLED)
self.master.update()
if recipe.doneDispensing():
self.master.save_configs()
self.master.screen_pop_to_top()
else:
self.pid = self.after(UPDATE_MS, self.update_screen)
def handle_button_back(self):
if self.pid != None:
self.after_cancel(self.pid)
self.recipe.cancelDispensing()
self.master.screen_pop()
| [
"rectbutton.RectButton",
"serial_connection.SerialConnection"
] | [((429, 447), 'serial_connection.SerialConnection', 'SerialConnection', ([], {}), '()\n', (445, 447), False, 'from serial_connection import SerialConnection\n'), ((595, 660), 'rectbutton.RectButton', 'RectButton', (['self'], {'text': '"""Abbruch"""', 'command': 'self.handle_button_back'}), "(self, text='Abbruch', command=self.handle_button_back)\n", (605, 660), False, 'from rectbutton import RectButton\n')] |
import unittest
import mock
import Tkinter
from cursecreator import Application
class TestNPCCreator(unittest.TestCase):
def setUp(self):
root = Tkinter.Tk()
self.app = Application(root)
def test_attribute_fixer(self):
self.assertTrue(self.app.attribute_fixer("health", 0))
self.assertFalse(self.app.attribute_fixer("banana", 0))
if __name__ == "__main__":
unittest.main() | [
"unittest.main",
"Tkinter.Tk",
"cursecreator.Application"
] | [((371, 386), 'unittest.main', 'unittest.main', ([], {}), '()\n', (384, 386), False, 'import unittest\n'), ((149, 161), 'Tkinter.Tk', 'Tkinter.Tk', ([], {}), '()\n', (159, 161), False, 'import Tkinter\n'), ((175, 192), 'cursecreator.Application', 'Application', (['root'], {}), '(root)\n', (186, 192), False, 'from cursecreator import Application\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Modbus TestKit: Implementation of Modbus protocol in python
(C)2009 - <NAME> - <EMAIL>
(C)2009 - Apidev - http://www.apidev.fr
This is distributed under GNU LGPL license, see license.txt
Make possible to write modbus TCP and RTU master and slave for testing purpose
Modbus TestKit is different from pymodbus which is another implementation of
the modbus stack in python
contributors:
----------------------------------
* OrangeTux
* denisstogl
* MELabs
* idahogray
* riaan.doorduin
* tor.sjowall
* smachin1000
* GadgetSteve
* dhoomakethu
* zerox1212
* ffreckle
* <NAME>
Please let us know if your name is missing!
"""
VERSION = '0.5.4'
import logging
LOGGER = logging.getLogger("modbus_tk")
| [
"logging.getLogger"
] | [((762, 792), 'logging.getLogger', 'logging.getLogger', (['"""modbus_tk"""'], {}), "('modbus_tk')\n", (779, 792), False, 'import logging\n')] |
from contextlib import contextmanager
from typing import Dict
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db import models, transaction
from django.utils.functional import cached_property, lazy
from django.utils.translation import gettext_lazy as _
from c3nav.mapdata.models import Space
class UserPermissions(models.Model):
"""
User Permissions
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True)
review_changesets = models.BooleanField(default=False, verbose_name=_('can review changesets'))
direct_edit = models.BooleanField(default=False, verbose_name=_('can activate direct editing'))
max_changeset_changes = models.PositiveSmallIntegerField(default=10, verbose_name=_('max changes per changeset'))
editor_access = models.BooleanField(default=False, verbose_name=_('can always access editor'))
base_mapdata_access = models.BooleanField(default=False, verbose_name=_('can always access base map data'))
manage_map_updates = models.BooleanField(default=False, verbose_name=_('manage map updates'))
control_panel = models.BooleanField(default=False, verbose_name=_('can access control panel'))
grant_permissions = models.BooleanField(default=False, verbose_name=_('can grant control permissions'))
manage_announcements = models.BooleanField(default=False, verbose_name=_('manage announcements'))
grant_all_access = models.BooleanField(default=False, verbose_name=_('can grant access to everything'))
grant_space_access = models.BooleanField(default=False, verbose_name=_('can grant space access'))
review_all_reports = models.BooleanField(default=False, verbose_name=_('can review all reports'))
review_group_reports = models.ManyToManyField('mapdata.LocationGroup', blank=True,
limit_choices_to={'access_restriction': None},
verbose_name=_('can review reports belonging to'))
api_secret = models.CharField(null=True, blank=True, max_length=64, verbose_name=_('API secret'))
class Meta:
verbose_name = _('User Permissions')
verbose_name_plural = _('User Permissions')
default_related_name = 'permissions'
def __init__(self, *args, initial=False, **kwargs):
super().__init__(*args, **kwargs)
if initial and self.user_id and self.user.is_superuser:
for field in UserPermissions._meta.get_fields():
if isinstance(field, models.BooleanField):
setattr(self, field.name, True)
@staticmethod
def get_cache_key(pk):
return 'control:permissions:%d' % pk
@cached_property
def review_group_ids(self):
if self.pk is None:
return ()
return tuple(self.review_group_reports.values_list('pk', flat=True))
@cached_property
def can_review_reports(self):
return self.review_all_reports or self.review_group_ids
@classmethod
@contextmanager
def lock(cls, pk):
with transaction.atomic():
User.objects.filter(pk=pk).select_for_update()
yield
@classmethod
def get_for_user(cls, user, force=False) -> 'UserPermissions':
if not user.is_authenticated:
return cls()
cache_key = cls.get_cache_key(user.pk)
result = None
if not force:
result = cache.get(cache_key, None)
for field in cls._meta.get_fields():
if not hasattr(result, field.attname):
result = None
break
if result:
return result
with cls.lock(user.pk):
result = cls.objects.filter(pk=user.pk).first()
if not result:
result = cls(user=user, initial=True)
# noinspection PyStatementEffect
result.review_group_ids
cache.set(cache_key, result, 900)
return result
def save(self, *args, **kwargs):
with self.lock(self.user_id):
super().save(*args, **kwargs)
cache_key = self.get_cache_key(self.pk)
cache.set(cache_key, self, 900)
@property
def can_access_base_mapdata(self):
return settings.PUBLIC_BASE_MAPDATA or self.base_mapdata_access
get_permissions_for_user_lazy = lazy(UserPermissions.get_for_user, UserPermissions)
class UserSpaceAccess(models.Model):
"""
User Authorities
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
space = models.ForeignKey(Space, on_delete=models.CASCADE)
can_edit = models.BooleanField(_('can edit'), default=False)
class Meta:
verbose_name = _('user space access')
verbose_name_plural = _('user space accesses')
default_related_name = 'spaceaccesses'
unique_together = (('user', 'space'))
@staticmethod
def get_cache_key(pk):
return 'control:spaceaccesses:%d' % pk
@classmethod
def get_for_user(cls, user, force=False) -> Dict[int, bool]:
if not user.is_authenticated:
return {}
cache_key = cls.get_cache_key(user.pk)
result = None
if not force:
result = cache.get(cache_key, None)
for field in cls._meta.get_fields():
if not hasattr(result, field.attname):
result = None
break
if result:
return result
with UserPermissions.lock(user.pk):
result = dict(cls.objects.filter(user=user).values_list('space_id', 'can_edit'))
cache.set(cache_key, result, 900)
return result
def save(self, *args, **kwargs):
with UserPermissions.lock(self.user_id):
UserPermissions.objects.filter(user_id=self.user_id).select_for_update()
super().save(*args, **kwargs)
cache_key = self.get_cache_key(self.user_id)
cache.delete(cache_key)
| [
"django.db.models.OneToOneField",
"django.db.transaction.atomic",
"django.db.models.ForeignKey",
"django.core.cache.cache.delete",
"django.utils.translation.gettext_lazy",
"django.contrib.auth.models.User.objects.filter",
"django.utils.functional.lazy",
"django.core.cache.cache.set",
"django.core.ca... | [((4439, 4490), 'django.utils.functional.lazy', 'lazy', (['UserPermissions.get_for_user', 'UserPermissions'], {}), '(UserPermissions.get_for_user, UserPermissions)\n', (4443, 4490), False, 'from django.utils.functional import cached_property, lazy\n'), ((458, 552), 'django.db.models.OneToOneField', 'models.OneToOneField', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE', 'primary_key': '(True)'}), '(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n primary_key=True)\n', (478, 552), False, 'from django.db import models, transaction\n'), ((4578, 4647), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE'}), '(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n', (4595, 4647), False, 'from django.db import models, transaction\n'), ((4660, 4710), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Space'], {'on_delete': 'models.CASCADE'}), '(Space, on_delete=models.CASCADE)\n', (4677, 4710), False, 'from django.db import models, transaction\n'), ((2228, 2249), 'django.utils.translation.gettext_lazy', '_', (['"""User Permissions"""'], {}), "('User Permissions')\n", (2229, 2249), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2280, 2301), 'django.utils.translation.gettext_lazy', '_', (['"""User Permissions"""'], {}), "('User Permissions')\n", (2281, 2301), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4746, 4759), 'django.utils.translation.gettext_lazy', '_', (['"""can edit"""'], {}), "('can edit')\n", (4747, 4759), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4816, 4838), 'django.utils.translation.gettext_lazy', '_', (['"""user space access"""'], {}), "('user space access')\n", (4817, 4838), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4869, 4893), 'django.utils.translation.gettext_lazy', '_', (['"""user space accesses"""'], {}), "('user space accesses')\n", (4870, 4893), True, 'from django.utils.translation import gettext_lazy as _\n'), ((622, 648), 'django.utils.translation.gettext_lazy', '_', (['"""can review changesets"""'], {}), "('can review changesets')\n", (623, 648), True, 'from django.utils.translation import gettext_lazy as _\n'), ((716, 748), 'django.utils.translation.gettext_lazy', '_', (['"""can activate direct editing"""'], {}), "('can activate direct editing')\n", (717, 748), True, 'from django.utils.translation import gettext_lazy as _\n'), ((836, 866), 'django.utils.translation.gettext_lazy', '_', (['"""max changes per changeset"""'], {}), "('max changes per changeset')\n", (837, 866), True, 'from django.utils.translation import gettext_lazy as _\n'), ((936, 965), 'django.utils.translation.gettext_lazy', '_', (['"""can always access editor"""'], {}), "('can always access editor')\n", (937, 965), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1041, 1077), 'django.utils.translation.gettext_lazy', '_', (['"""can always access base map data"""'], {}), "('can always access base map data')\n", (1042, 1077), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1152, 1175), 'django.utils.translation.gettext_lazy', '_', (['"""manage map updates"""'], {}), "('manage map updates')\n", (1153, 1175), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1246, 1275), 'django.utils.translation.gettext_lazy', '_', (['"""can access control panel"""'], {}), "('can access control panel')\n", (1247, 1275), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1349, 1383), 'django.utils.translation.gettext_lazy', '_', (['"""can grant control permissions"""'], {}), "('can grant control permissions')\n", (1350, 1383), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1460, 1485), 'django.utils.translation.gettext_lazy', '_', (['"""manage announcements"""'], {}), "('manage announcements')\n", (1461, 1485), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1558, 1593), 'django.utils.translation.gettext_lazy', '_', (['"""can grant access to everything"""'], {}), "('can grant access to everything')\n", (1559, 1593), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1668, 1695), 'django.utils.translation.gettext_lazy', '_', (['"""can grant space access"""'], {}), "('can grant space access')\n", (1669, 1695), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1771, 1798), 'django.utils.translation.gettext_lazy', '_', (['"""can review all reports"""'], {}), "('can review all reports')\n", (1772, 1798), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2047, 2083), 'django.utils.translation.gettext_lazy', '_', (['"""can review reports belonging to"""'], {}), "('can review reports belonging to')\n", (2048, 2083), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2171, 2186), 'django.utils.translation.gettext_lazy', '_', (['"""API secret"""'], {}), "('API secret')\n", (2172, 2186), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3148, 3168), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (3166, 3168), False, 'from django.db import models, transaction\n'), ((3507, 3533), 'django.core.cache.cache.get', 'cache.get', (['cache_key', 'None'], {}), '(cache_key, None)\n', (3516, 3533), False, 'from django.core.cache import cache\n'), ((4009, 4042), 'django.core.cache.cache.set', 'cache.set', (['cache_key', 'result', '(900)'], {}), '(cache_key, result, 900)\n', (4018, 4042), False, 'from django.core.cache import cache\n'), ((4247, 4278), 'django.core.cache.cache.set', 'cache.set', (['cache_key', 'self', '(900)'], {}), '(cache_key, self, 900)\n', (4256, 4278), False, 'from django.core.cache import cache\n'), ((5335, 5361), 'django.core.cache.cache.get', 'cache.get', (['cache_key', 'None'], {}), '(cache_key, None)\n', (5344, 5361), False, 'from django.core.cache import cache\n'), ((5720, 5753), 'django.core.cache.cache.set', 'cache.set', (['cache_key', 'result', '(900)'], {}), '(cache_key, result, 900)\n', (5729, 5753), False, 'from django.core.cache import cache\n'), ((6059, 6082), 'django.core.cache.cache.delete', 'cache.delete', (['cache_key'], {}), '(cache_key)\n', (6071, 6082), False, 'from django.core.cache import cache\n'), ((3182, 3208), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'pk': 'pk'}), '(pk=pk)\n', (3201, 3208), False, 'from django.contrib.auth.models import User\n')] |
#!/usr/bin/env python
# Copyright (c) 2011, <NAME> (Berlin, Germany)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer. Redistributions in binary form must
# reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import DyMat, DyMat.Export, random
files = ('DoublePendulum_Dymola-7.4.mat',
'DoublePendulum_OpenModelica-1.8.mat',
'DoublePendulum_Dymola-2012.mat',
'DoublePendulum_Dymola-2012-SaveAs.mat',
'DoublePendulum_Dymola-2012-SaveAsPlotted.mat')
formats = DyMat.Export.formats.keys()
for fi in files:
# open file
df = DyMat.DyMatFile(fi)
# pick a maximum of 30 random variable names
n = df.names()
x = min(len(n), 30)
va = random.sample(df.names(), x)
print(va)
# do export
for fo in formats:
print('Exporting %s to %s' % (fi, fo))
try:
DyMat.Export.export(fo, df, va)
except Exception as e:
print(e)
| [
"DyMat.Export.formats.keys",
"DyMat.Export.export",
"DyMat.DyMatFile"
] | [((1649, 1676), 'DyMat.Export.formats.keys', 'DyMat.Export.formats.keys', ([], {}), '()\n', (1674, 1676), False, 'import DyMat, DyMat.Export, random\n'), ((1721, 1740), 'DyMat.DyMatFile', 'DyMat.DyMatFile', (['fi'], {}), '(fi)\n', (1736, 1740), False, 'import DyMat, DyMat.Export, random\n'), ((2006, 2037), 'DyMat.Export.export', 'DyMat.Export.export', (['fo', 'df', 'va'], {}), '(fo, df, va)\n', (2025, 2037), False, 'import DyMat, DyMat.Export, random\n')] |
#!/usr/bin/env python
from distutils.core import setup
setup(name='longshot',
version='0.1alpha',
description='SMOK client connectivity library',
author='<EMAIL>k-serwis.pl',
author_email='<EMAIL>',
url='https://github.com/smok-serwis/longshot-python',
packages=['longshot', 'longshot.persistence'],
) | [
"distutils.core.setup"
] | [((57, 319), 'distutils.core.setup', 'setup', ([], {'name': '"""longshot"""', 'version': '"""0.1alpha"""', 'description': '"""SMOK client connectivity library"""', 'author': '"""<EMAIL>k-serwis.pl"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/smok-serwis/longshot-python"""', 'packages': "['longshot', 'longshot.persistence']"}), "(name='longshot', version='0.1alpha', description=\n 'SMOK client connectivity library', author='<EMAIL>k-serwis.pl',\n author_email='<EMAIL>', url=\n 'https://github.com/smok-serwis/longshot-python', packages=['longshot',\n 'longshot.persistence'])\n", (62, 319), False, 'from distutils.core import setup\n')] |
import sys
from asyncio.exceptions import CancelledError
from time import sleep
from usercodex import codex
from ..core.logger import logging
from ..core.managers import edit_or_reply
from ..sql_helper.global_collection import (
add_to_collectionlist,
del_keyword_collectionlist,
get_collectionlist_items,
)
from ..sql_helper.globals import addgvar, delgvar, gvarstatus
from . import BOTLOG, BOTLOG_CHATID, HEROKU_APP
LOGS = logging.getLogger(__name__)
plugin_category = "tools"
@codex.cod_cmd(
pattern="restart$",
command=("restart", plugin_category),
info={
"header": "Restarts the bot !!",
"usage": "{tr}restart",
},
disable_errors=True,
)
async def _(event):
"Restarts the bot !!"
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#RESTART \n" "Bot Restarted")
xedoc = await edit_or_reply(
event,
"Restarted. `.ping` me or `.help` to check if I am online, actually it takes 1-2 min for restarting",
)
try:
ulist = get_collectionlist_items()
for i in ulist:
if i == "restart_update":
del_keyword_collectionlist("restart_update")
except Exception as e:
LOGS.error(e)
try:
add_to_collectionlist("restart_update", [xedoc.chat_id, xedoc.id])
except Exception as e:
LOGS.error(e)
try:
delgvar("ipaddress")
await codex.disconnect()
except CancelledError:
pass
except Exception as e:
LOGS.error(e)
@codex.cod_cmd(
pattern="shutdown$",
command=("shutdown", plugin_category),
info={
"header": "Shutdowns the bot !!",
"description": "To turn off the dyno of heroku. you cant turn on by bot you need to got to heroku and turn on or use @hk_heroku_bot",
"usage": "{tr}shutdown",
},
)
async def _(event):
"Shutdowns the bot"
if BOTLOG:
await event.client.send_message(BOTLOG_CHATID, "#SHUTDOWN \n" "Bot shut down")
await edit_or_reply(event, "`Turning off bot now ...Manually turn me on later`")
if HEROKU_APP is not None:
HEROKU_APP.process_formation()["worker"].scale(0)
else:
sys.exit(0)
@codex.cod_cmd(
pattern="sleep( [0-9]+)?$",
command=("sleep", plugin_category),
info={
"header": "Userbot will stop working for the mentioned time.",
"usage": "{tr}sleep <seconds>",
"examples": "{tr}sleep 60",
},
)
async def _(event):
"To sleep the userbot"
if " " not in event.pattern_match.group(1):
return await edit_or_reply(event, "Syntax: `.sleep time`")
counter = int(event.pattern_match.group(1))
if BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
"You put the bot to sleep for " + str(counter) + " seconds",
)
event = await edit_or_reply(event, f"`ok, let me sleep for {counter} seconds`")
sleep(counter)
await event.edit("`OK, I'm awake now.`")
@codex.cod_cmd(
pattern="notify (on|off)$",
command=("notify", plugin_category),
info={
"header": "To update the your chat after restart or reload .",
"description": "Will send the ping cmd as reply to the previous last msg of (restart/reload/update cmds).",
"usage": [
"{tr}notify <on/off>",
],
},
)
async def set_pmlog(event):
"To update the your chat after restart or reload ."
input_str = event.pattern_match.group(1)
if input_str == "off":
if gvarstatus("restartupdate") is None:
return await edit_delete(event, "__Notify was already disabled__")
delgvar("restartupdate")
return await edit_or_reply(event, "__Notify was disabled successfully.__")
if gvarstatus("restartupdate") is None:
addgvar("restartupdate", "turn-oned")
return await edit_or_reply(event, "__Notify was enabled successfully.__")
await edit_delete(event, "__Notify was already enabled.__")
| [
"usercodex.codex.disconnect",
"sys.exit",
"time.sleep",
"usercodex.codex.cod_cmd"
] | [((497, 661), 'usercodex.codex.cod_cmd', 'codex.cod_cmd', ([], {'pattern': '"""restart$"""', 'command': "('restart', plugin_category)", 'info': "{'header': 'Restarts the bot !!', 'usage': '{tr}restart'}", 'disable_errors': '(True)'}), "(pattern='restart$', command=('restart', plugin_category),\n info={'header': 'Restarts the bot !!', 'usage': '{tr}restart'},\n disable_errors=True)\n", (510, 661), False, 'from usercodex import codex\n'), ((1527, 1813), 'usercodex.codex.cod_cmd', 'codex.cod_cmd', ([], {'pattern': '"""shutdown$"""', 'command': "('shutdown', plugin_category)", 'info': "{'header': 'Shutdowns the bot !!', 'description':\n 'To turn off the dyno of heroku. you cant turn on by bot you need to got to heroku and turn on or use @hk_heroku_bot'\n , 'usage': '{tr}shutdown'}"}), "(pattern='shutdown$', command=('shutdown', plugin_category),\n info={'header': 'Shutdowns the bot !!', 'description':\n 'To turn off the dyno of heroku. you cant turn on by bot you need to got to heroku and turn on or use @hk_heroku_bot'\n , 'usage': '{tr}shutdown'})\n", (1540, 1813), False, 'from usercodex import codex\n'), ((2200, 2416), 'usercodex.codex.cod_cmd', 'codex.cod_cmd', ([], {'pattern': '"""sleep( [0-9]+)?$"""', 'command': "('sleep', plugin_category)", 'info': "{'header': 'Userbot will stop working for the mentioned time.', 'usage':\n '{tr}sleep <seconds>', 'examples': '{tr}sleep 60'}"}), "(pattern='sleep( [0-9]+)?$', command=('sleep', plugin_category\n ), info={'header': 'Userbot will stop working for the mentioned time.',\n 'usage': '{tr}sleep <seconds>', 'examples': '{tr}sleep 60'})\n", (2213, 2416), False, 'from usercodex import codex\n'), ((2981, 3288), 'usercodex.codex.cod_cmd', 'codex.cod_cmd', ([], {'pattern': '"""notify (on|off)$"""', 'command': "('notify', plugin_category)", 'info': "{'header': 'To update the your chat after restart or reload .',\n 'description':\n 'Will send the ping cmd as reply to the previous last msg of (restart/reload/update cmds).'\n , 'usage': ['{tr}notify <on/off>']}"}), "(pattern='notify (on|off)$', command=('notify',\n plugin_category), info={'header':\n 'To update the your chat after restart or reload .', 'description':\n 'Will send the ping cmd as reply to the previous last msg of (restart/reload/update cmds).'\n , 'usage': ['{tr}notify <on/off>']})\n", (2994, 3288), False, 'from usercodex import codex\n'), ((2918, 2932), 'time.sleep', 'sleep', (['counter'], {}), '(counter)\n', (2923, 2932), False, 'from time import sleep\n'), ((2185, 2196), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2193, 2196), False, 'import sys\n'), ((1416, 1434), 'usercodex.codex.disconnect', 'codex.disconnect', ([], {}), '()\n', (1432, 1434), False, 'from usercodex import codex\n')] |
from utils import load, save, path_list, DEAD_PMTS
import nets
import torch
import numpy as np
import pandas as pd
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
from itertools import repeat
from multiprocessing import Pool
def neural_residual(root_dir):
# model selection
net_type = load(root_dir + '/configuration.json')['net_type']
if net_type == 'Net':
net = nets.Net()
elif net_type == 'Net2c':
net = nets.Net2c()
elif net_type == 'CNN1c':
net = nets.CNN1c()
elif net_type == 'CNN2c':
net = nets.CNN2c()
else:
print('invalide net type')
raise ValueError
# get the latest model for neural network
epoch_path = path_list(root_dir + '/models/')[-1]
model_path = path_list(epoch_path, filter='pt')[-1]
net.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
# get inputs, labels, outputs and residuals
inputs = load(root_dir + '/test_inputs.tensor').float()
labels = load(root_dir + '/test_labels.tensor').float().numpy()
outputs = net(inputs).detach().cpu().clone().numpy()
residuals = (outputs - labels) * 1000
return residuals.T
def cwm_residual(root_dir):
try:
interp_r = load('src/interp_r')
interp_z = load('src/interp_z')
except FileNotFoundError:
with open('src/WeightingCorrection_att.dat', 'r') as f:
df = []
while True:
line = f.readline().split(' ')
line = list(filter(lambda a: a != '', line))
try:
line[3] = line[3][:-1]
except IndexError:
break
df.append(line)
df = pd.DataFrame(df, dtype=float)
# calculate interpolation
R = df[0]
Z = df[1]
weight_R = df[2]
weight_Z = df[3]
interp_r = interpolate.interp2d(R, Z, weight_R, kind='linear')
interp_z = interpolate.interp2d(R, Z, weight_Z, kind='linear')
save(interp_r, 'src/interp_r')
save(interp_z, 'src/interp_z')
pmt_positions = load('src/pmtcoordinates_ID.json')
testpaths = load(root_dir + '/testpaths.list')
# multiprocessing
p = Pool(processes=40)
residuals = []
total = len(testpaths)
for i in range(5):
print('getting cwm residuals... %i' % i)
paths_batch = testpaths[int(0.2 * i * total):int(0.2 * (i + 1) * total)]
residuals += p.starmap(__job, zip(paths_batch,
repeat(interp_r),
repeat(interp_z),
repeat(pmt_positions)
)
)
residuals = [r for r in residuals if r]
return np.array(residuals).T
def __job(path, interp_r, interp_z, pmt_positions):
f = load(path)
capture_time = f['capture_time'] # scalar value
hits = int(f['photon_hits']) # scalar value
hit_counts = f['hit_count'] # vector value
hit_pmts = f['hit_pmt'] # vector value
hit_time = f['hit_time'] # vector value
true_vertex = [f['positron_x'], f['positron_y'], f['positron_z']]
x = np.zeros(354)
for i in range(hits):
pmt = hit_pmts[i]
count = hit_counts[i]
t = hit_time[i]
if pmt in DEAD_PMTS:
continue
if t < capture_time:
x[pmt] += count
# if the entry is valid, reconstruct the vertex
if sum(x) > 0:
# calculate cwm vertex
reco_vertex = np.array([.0, .0, .0])
for pmt_id, hits in enumerate(x):
pmt_pos = pmt_positions[str(pmt_id)]
reco_vertex += hits * np.array([pmt_pos['x'], pmt_pos['y'], pmt_pos['z']], dtype=float)
# normalize
reco_vertex = reco_vertex / sum(x)
# correction 1
weight1r = interp_r(np.linalg.norm(reco_vertex[:2]), abs(reco_vertex[2]))
weight1z = interp_z(np.linalg.norm(reco_vertex[:2]), abs(reco_vertex[2]))
reco_vertex[:2] *= weight1r
reco_vertex[2] *= weight1z
# correction 2
weight2 = 0.8784552 - 0.0000242758 * np.linalg.norm(reco_vertex[:2])
reco_vertex *= weight2
return (reco_vertex - true_vertex).tolist()
else:
return False
def filter_nsigma(outputs, n):
ns, bins = np.histogram(outputs, bins=200)
peak = bins[np.argmax(ns)]
std = np.std(outputs)
return [output for output in outputs if (peak - n * std) < output < (peak + n * std)]
def main():
# control group
print('control group root: ')
control_root = str(input())
print('control group name:')
control_name = str(input())
# experimental group
print('# of experimental groups:')
ex_number = int(input())
print('experimental group roots (%i):' % ex_number)
ex_root = [str(input()) for _ in range(ex_number)]
print('experimental group names')
ex_names = []
for i in range(ex_number):
print('name for ' + ex_root[i])
ex_names.append(str(input()))
# get residuals
print('calculating residuals')
control_residual = cwm_residual(root_dir=control_root)
ex_residuals = [neural_residual(root_dir=ex_root[i]) for i in range(ex_number)]
# draw histograms
print('drawing histograms')
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
for axis in range(3):
axes[axis].hist(control_residual[axis],
bins=200,
density=True,
histtype='step',
linestyle=':',
color='black',
label=control_name)
for i in range(ex_number):
axes[axis].hist(ex_residuals[i][axis],
bins=200,
density=True,
histtype='step',
label=ex_names[i])
# Text on filtered sigma
control_filtered_std = np.std(filter_nsigma(control_residual[axis], n=2))
ex_filtered_std = [np.std(filter_nsigma(ex_residuals[i][axis], n=2)) for i in range(ex_number)]
text_std = '$\\sigma_{%s}=%.1fmm$' % (control_name, control_filtered_std)
for i in range(ex_number):
text_std += '\n$\\sigma_{%s}=%.1fmm$' % (ex_names[i], ex_filtered_std[i])
axes[axis].text(200, 0.78/100,
text_std,
ha='left', va='top',
fontsize=8,
bbox=dict(boxstyle='square', fc='w'))
# axes properties
axis_name = ['x', 'y', 'z'][axis]
axes[axis].set_xlabel(r'$%s_{rec} - %s_{real} $ (mm)' % (axis_name, axis_name))
axes[axis].set_ylabel('portion')
axes[axis].yaxis.set_major_formatter(PercentFormatter(1))
axes[axis].set_xlim([-1000, 1000])
axes[axis].set_ylim([0, 0.8/100])
axes[axis].grid()
axes[axis].legend(fontsize=8, loc='upper left')
plt.tight_layout()
plt.savefig('MC_vis_histogram.png')
plt.close()
if __name__ == '__main__':
main()
| [
"matplotlib.ticker.PercentFormatter",
"utils.load",
"numpy.array",
"numpy.linalg.norm",
"scipy.interpolate.interp2d",
"nets.Net",
"itertools.repeat",
"numpy.histogram",
"matplotlib.pyplot.close",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"nets.CNN1c",
"nets.Net2c",
"numpy.argmax",
... | [((2208, 2242), 'utils.load', 'load', (['"""src/pmtcoordinates_ID.json"""'], {}), "('src/pmtcoordinates_ID.json')\n", (2212, 2242), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((2259, 2293), 'utils.load', 'load', (["(root_dir + '/testpaths.list')"], {}), "(root_dir + '/testpaths.list')\n", (2263, 2293), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((2325, 2343), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(40)'}), '(processes=40)\n', (2329, 2343), False, 'from multiprocessing import Pool\n'), ((3001, 3011), 'utils.load', 'load', (['path'], {}), '(path)\n', (3005, 3011), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((3372, 3385), 'numpy.zeros', 'np.zeros', (['(354)'], {}), '(354)\n', (3380, 3385), True, 'import numpy as np\n'), ((4527, 4558), 'numpy.histogram', 'np.histogram', (['outputs'], {'bins': '(200)'}), '(outputs, bins=200)\n', (4539, 4558), True, 'import numpy as np\n'), ((4600, 4615), 'numpy.std', 'np.std', (['outputs'], {}), '(outputs)\n', (4606, 4615), True, 'import numpy as np\n'), ((5514, 5549), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(14, 4)'}), '(1, 3, figsize=(14, 4))\n', (5526, 5549), True, 'import matplotlib.pyplot as plt\n'), ((7194, 7212), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7210, 7212), True, 'import matplotlib.pyplot as plt\n'), ((7217, 7252), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""MC_vis_histogram.png"""'], {}), "('MC_vis_histogram.png')\n", (7228, 7252), True, 'import matplotlib.pyplot as plt\n'), ((7257, 7268), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7266, 7268), True, 'import matplotlib.pyplot as plt\n'), ((358, 396), 'utils.load', 'load', (["(root_dir + '/configuration.json')"], {}), "(root_dir + '/configuration.json')\n", (362, 396), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((449, 459), 'nets.Net', 'nets.Net', ([], {}), '()\n', (457, 459), False, 'import nets\n'), ((765, 797), 'utils.path_list', 'path_list', (["(root_dir + '/models/')"], {}), "(root_dir + '/models/')\n", (774, 797), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((819, 853), 'utils.path_list', 'path_list', (['epoch_path'], {'filter': '"""pt"""'}), "(epoch_path, filter='pt')\n", (828, 853), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((1298, 1318), 'utils.load', 'load', (['"""src/interp_r"""'], {}), "('src/interp_r')\n", (1302, 1318), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((1338, 1358), 'utils.load', 'load', (['"""src/interp_z"""'], {}), "('src/interp_z')\n", (1342, 1358), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((2917, 2936), 'numpy.array', 'np.array', (['residuals'], {}), '(residuals)\n', (2925, 2936), True, 'import numpy as np\n'), ((3726, 3751), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (3734, 3751), True, 'import numpy as np\n'), ((4575, 4588), 'numpy.argmax', 'np.argmax', (['ns'], {}), '(ns)\n', (4584, 4588), True, 'import numpy as np\n'), ((504, 516), 'nets.Net2c', 'nets.Net2c', ([], {}), '()\n', (514, 516), False, 'import nets\n'), ((1002, 1040), 'utils.load', 'load', (["(root_dir + '/test_inputs.tensor')"], {}), "(root_dir + '/test_inputs.tensor')\n", (1006, 1040), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((4056, 4087), 'numpy.linalg.norm', 'np.linalg.norm', (['reco_vertex[:2]'], {}), '(reco_vertex[:2])\n', (4070, 4087), True, 'import numpy as np\n'), ((4138, 4169), 'numpy.linalg.norm', 'np.linalg.norm', (['reco_vertex[:2]'], {}), '(reco_vertex[:2])\n', (4152, 4169), True, 'import numpy as np\n'), ((7001, 7020), 'matplotlib.ticker.PercentFormatter', 'PercentFormatter', (['(1)'], {}), '(1)\n', (7017, 7020), False, 'from matplotlib.ticker import PercentFormatter\n'), ((561, 573), 'nets.CNN1c', 'nets.CNN1c', ([], {}), '()\n', (571, 573), False, 'import nets\n'), ((918, 937), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (930, 937), False, 'import torch\n'), ((1779, 1808), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'dtype': 'float'}), '(df, dtype=float)\n', (1791, 1808), True, 'import pandas as pd\n'), ((1974, 2025), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['R', 'Z', 'weight_R'], {'kind': '"""linear"""'}), "(R, Z, weight_R, kind='linear')\n", (1994, 2025), False, 'from scipy import interpolate\n'), ((2049, 2100), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['R', 'Z', 'weight_Z'], {'kind': '"""linear"""'}), "(R, Z, weight_Z, kind='linear')\n", (2069, 2100), False, 'from scipy import interpolate\n'), ((2113, 2143), 'utils.save', 'save', (['interp_r', '"""src/interp_r"""'], {}), "(interp_r, 'src/interp_r')\n", (2117, 2143), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((2156, 2186), 'utils.save', 'save', (['interp_z', '"""src/interp_z"""'], {}), "(interp_z, 'src/interp_z')\n", (2160, 2186), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((2641, 2657), 'itertools.repeat', 'repeat', (['interp_r'], {}), '(interp_r)\n', (2647, 2657), False, 'from itertools import repeat\n'), ((2701, 2717), 'itertools.repeat', 'repeat', (['interp_z'], {}), '(interp_z)\n', (2707, 2717), False, 'from itertools import repeat\n'), ((2761, 2782), 'itertools.repeat', 'repeat', (['pmt_positions'], {}), '(pmt_positions)\n', (2767, 2782), False, 'from itertools import repeat\n'), ((3874, 3939), 'numpy.array', 'np.array', (["[pmt_pos['x'], pmt_pos['y'], pmt_pos['z']]"], {'dtype': 'float'}), "([pmt_pos['x'], pmt_pos['y'], pmt_pos['z']], dtype=float)\n", (3882, 3939), True, 'import numpy as np\n'), ((4332, 4363), 'numpy.linalg.norm', 'np.linalg.norm', (['reco_vertex[:2]'], {}), '(reco_vertex[:2])\n', (4346, 4363), True, 'import numpy as np\n'), ((618, 630), 'nets.CNN2c', 'nets.CNN2c', ([], {}), '()\n', (628, 630), False, 'import nets\n'), ((1062, 1100), 'utils.load', 'load', (["(root_dir + '/test_labels.tensor')"], {}), "(root_dir + '/test_labels.tensor')\n", (1066, 1100), False, 'from utils import load, save, path_list, DEAD_PMTS\n')] |
import datetime as dt
import unittest
from AShareData import set_global_config
from AShareData.model import *
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
set_global_config('config.json')
def test_something(self):
self.assertEqual(True, False)
@staticmethod
def test_FF3factor_return():
model = FamaFrench3FactorModel()
smb = SMBandHMLCompositor(model)
date = dt.datetime(2021, 3, 9)
pre_date = dt.datetime(2021, 3, 8)
pre_month_date = dt.datetime(2021, 2, 26)
smb.compute_factor_return(balance_date=pre_date, pre_date=pre_date, date=date,
rebalance_marker='D', period_marker='D')
smb.compute_factor_return(balance_date=pre_month_date, pre_date=pre_date, date=date,
rebalance_marker='M', period_marker='D')
smb.compute_factor_return(balance_date=pre_month_date, pre_date=pre_month_date, date=date,
rebalance_marker='M', period_marker='M')
@staticmethod
def test_FFC4_factor_return():
model = FamaFrenchCarhart4FactorModel()
umd = UMDCompositor(model)
date = dt.datetime(2021, 3, 9)
pre_date = dt.datetime(2021, 3, 8)
pre_month_date = dt.datetime(2021, 2, 26)
umd.compute_factor_return(balance_date=pre_date, pre_date=pre_date, date=date,
rebalance_marker='D', period_marker='D')
umd.compute_factor_return(balance_date=pre_month_date, pre_date=pre_date, date=date,
rebalance_marker='M', period_marker='D')
umd.compute_factor_return(balance_date=pre_month_date, pre_date=pre_month_date, date=date,
rebalance_marker='M', period_marker='M')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"datetime.datetime",
"AShareData.set_global_config"
] | [((1865, 1880), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1878, 1880), False, 'import unittest\n'), ((187, 219), 'AShareData.set_global_config', 'set_global_config', (['"""config.json"""'], {}), "('config.json')\n", (204, 219), False, 'from AShareData import set_global_config\n'), ((438, 461), 'datetime.datetime', 'dt.datetime', (['(2021)', '(3)', '(9)'], {}), '(2021, 3, 9)\n', (449, 461), True, 'import datetime as dt\n'), ((481, 504), 'datetime.datetime', 'dt.datetime', (['(2021)', '(3)', '(8)'], {}), '(2021, 3, 8)\n', (492, 504), True, 'import datetime as dt\n'), ((530, 554), 'datetime.datetime', 'dt.datetime', (['(2021)', '(2)', '(26)'], {}), '(2021, 2, 26)\n', (541, 554), True, 'import datetime as dt\n'), ((1211, 1234), 'datetime.datetime', 'dt.datetime', (['(2021)', '(3)', '(9)'], {}), '(2021, 3, 9)\n', (1222, 1234), True, 'import datetime as dt\n'), ((1254, 1277), 'datetime.datetime', 'dt.datetime', (['(2021)', '(3)', '(8)'], {}), '(2021, 3, 8)\n', (1265, 1277), True, 'import datetime as dt\n'), ((1303, 1327), 'datetime.datetime', 'dt.datetime', (['(2021)', '(2)', '(26)'], {}), '(2021, 2, 26)\n', (1314, 1327), True, 'import datetime as dt\n')] |
import torch
from collections.abc import Iterable
def _get_layers(model, all_layers=None, all_names=None, top_name=None, fn=None, sep='_'):
"""Auxiliar function. Recursive method for getting all in the model for which `fn(layer)=True`."""
if all_names is None:
all_names = []
if all_layers is None:
all_layers = []
if top_name is None:
top_name = ''
if fn is None:
fn = lambda l: True
for name, layer in model.named_children():
if list(layer.children()):
all_names, all_layers = _get_layers(layer, all_layers, all_names, top_name+name+sep, fn)
else:
if fn(layer):
all_names.append(top_name + name)
all_layers.append(layer)
return all_names, all_layers
def get_layers(model, fn=None, sep='_'):
"""Get all layers of torch.nn.Module for which `fn(layer)=True` using a depth-first search.
Given the module `model` and the function `fn(layer: torch.nn.Module) -> bool` return all layers
for which the function returns true. Return a list of tuples: ('name', Module). For nested blocks
the name is a single string, with subblocks names separed by `sep` (by default `sep=_`). For instance,
`layer1_0_conv1` for 3 nested blocks `layer1`, `0`, `conv1`."""
all_names, all_layers = _get_layers(model, fn=fn, sep=sep)
return list(zip(all_names, all_layers))
def replace_layer(model, layer_name, replace_fn):
"""Replace single layer in a (possibly nested) torch.nn.Module using `replace_fn`.
Given a module `model` and a layer specified by `layer_name` replace the layer using
`new_layer = replace_fn(old_layer)`. Here `layer_name` is a list of strings, each string
indexing a level of the nested model."""
if layer_name:
nm = layer_name.pop()
model._modules[nm] = replace_layer(model._modules[nm], layer_name, replace_fn)
else:
model = replace_fn(model)
return model
def replace_all_layers(model, layers, replace_fn, sep='_'):
"""Replace layers in a (possibly nested) torch.nn.Module using `replace_fn`.
Given a module `model` and a layer specified by `layer_name` replace the layer using
`new_layer = replace_fn(old_layer)`. Here `layer_name` is a list of strings, each string
indexing a level of the nested model."""
for l in layers:
model = replace_layer(model, l.split(sep)[::-1], replace_fn)
return model
class SaveIntermediaryValues(object):
"""Module for saving intermediary values."""
def __init__(self, collapsing_fn, is_layer_fn, n_samples):
self.collapsing_fn = collapsing_fn
self.is_layer_fn = is_layer_fn
self.batch_dim = 0
self.n_samples = n_samples
self.counter = None
self.is_first_execution = None
self.storage = None
self.layer_names = None
def save_forward_hooks(self, model):
all_layers = get_layers(model, fn=self.is_layer_fn)
self.layer_names = list(list(zip(*all_layers))[0])
self.storage = {name: None for name in self.layer_names}
self.counter = {name: 0 for name in self.layer_names}
self.is_first_execution = {name: True for name in self.layer_names}
for name in self.layer_names:
model = replace_all_layers(model, [name], replace_fn=self.hook(name))
return model
def hook(self, name):
def register_forward_hook(layer):
def forward_hook(_self, inp, _out):
x = self.collapsing_fn(inp[0], _self)
if self.is_first_execution[name]:
self.is_first_execution[name] = False
self.storage[name] = self.init_storage(x)
delta = self.update_storage(x, self.storage[name], self.counter[name])
self.counter[name] += delta
layer.register_forward_hook(forward_hook)
return layer
return register_forward_hook
def init_storage(self, x):
if type(x) == torch.Tensor:
shape = list(x.shape)
shape[self.batch_dim] = self.n_samples
return torch.zeros(shape, dtype=x.dtype)
elif type(x) == dict:
aux = {}
for key, value in x.items():
aux[key] = self.init_storage(value)
return aux
elif isinstance(x, Iterable):
aux = []
for xx in x:
aux.append(self.init_storage(xx))
return tuple(aux)
else:
raise NotImplementedError()
def update_storage(self, x, storage, counter):
if type(x) == torch.Tensor:
delta = x.shape[self.batch_dim]
storage[counter:counter + delta, ...] = x
return delta
elif type(x) == dict:
delta = 0
for key, value in x.items():
delta = self.update_storage(value, storage[key], counter)
return delta
elif isinstance(x, Iterable):
delta = 0
iter_storage = iter(storage)
for xx in x:
delta = self.update_storage(xx, next(iter_storage), counter)
return delta
else:
raise NotImplementedError()
def reset_storage(self, storage=None):
if storage is None:
storage = self.storage
if type(storage) == torch.Tensor:
storage[...] = 0
elif type(storage) == dict:
for key, value in storage.items():
self.reset_storage(storage[key])
elif isinstance(storage, Iterable):
iter_storage = iter(storage)
for xx in x:
self.reset_storage(next(iter_storage))
else:
raise NotImplementedError()
def reset(self):
self.counter = {name: 0 for name in self.layer_names}
self.is_first_execution = {name: True for name in self.layer_names}
self.reset_storage() | [
"torch.zeros"
] | [((4141, 4174), 'torch.zeros', 'torch.zeros', (['shape'], {'dtype': 'x.dtype'}), '(shape, dtype=x.dtype)\n', (4152, 4174), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.evolve.functionslot The *function slot* concept is large used by Pyevolve, the idea
# is simple, each genetic operator or any operator, can be assigned
# to a slot, by this way, we can add more than simple one operator,
# we can have for example, two or more mutator operators at same time,
# two or more evaluation functions, etc. In this :mod:`FunctionSlot` module,
# you'll find the class :class:`FunctionSlot.FunctionSlot`, which is the slot class.
# -----------------------------------------------------------------
# Import standard modules
from types import BooleanType
# Import other evolve modules
import utils
# Import the relevant PTS classes and modules
from ..core.tools.random import prng
# -----------------------------------------------------------------
class FunctionSlot(object):
"""
FunctionSlot Class - The function slot
Example:
>>> genome.evaluator.set(eval_func)
>>> genome.evaluator[0]
<function eval_func at 0x018C8930>
>>> genome.evaluator
Slot [Evaluation Function] (Count: 1)
Name: eval_func
>>> genome.evaluator.clear()
>>> genome.evaluator
Slot [Evaluation Function] (Count: 0)
No function
You can add weight to functions when using the `rand_apply` paramter:
>>> genome.evaluator.set(eval_main, 0.9)
>>> genome.evaluator.add(eval_sec, 0.3)
>>> genome.evaluator.setRandomApply()
In the above example, the function *eval_main* will be called with 90% of
probability and the *eval_sec* will be called with 30% of probability.
There are another way to add functions too:
>>> genome.evaluator += eval_func
:param name: the slot name
:param rand_apply: if True, just one of the functions in the slot
will be applied, this function is randomly picked based
on the weight of the function added.
"""
def __init__(self, name="Anonymous Function", rand_apply=False):
""" The creator of the FunctionSlot Class """
self.funcList = []
self.funcWeights = []
self.slotName = name
self.rand_apply = rand_apply
# -----------------------------------------------------------------
def __typeCheck(self, func):
"""
Used internally to check if a function passed to the
function slot is callable. Otherwise raises a TypeError exception.
:param func: the function object
"""
if not callable(func):
utils.raiseException("The function must be a method or function", TypeError)
# -----------------------------------------------------------------
def __iadd__(self, func):
""" To add more functions using the += operator
.. versionadded:: 0.6
The __iadd__ method.
"""
self.__typeCheck(func)
self.funcList.append(func)
return self
# -----------------------------------------------------------------
def __getitem__(self, index):
""" Used to retrieve some slot function index """
return self.funcList[index]
# -----------------------------------------------------------------
def __setitem__(self, index, value):
""" Used to set the index slot function """
self.__typeCheck(value)
self.funcList[index] = value
# -----------------------------------------------------------------
def __iter__(self):
""" Return the function list iterator """
return iter(self.funcList)
# -----------------------------------------------------------------
def __len__(self):
""" Return the number of functions on the slot
.. versionadded:: 0.6
The *__len__* method
"""
return len(self.funcList)
# -----------------------------------------------------------------
def setRandomApply(self, flag=True):
"""
Sets the random function application, in this mode, the
function will randomly choose one slot to apply
:param flag: True or False
"""
if type(flag) != BooleanType:
utils.raiseException("Random option must be True or False", TypeError)
self.rand_apply = flag
# -----------------------------------------------------------------
def clear(self):
""" Used to clear the functions in the slot """
if len(self.funcList) > 0:
del self.funcList[:]
del self.funcWeights[:]
# -----------------------------------------------------------------
def add(self, func, weight=0.5):
""" Used to add a function to the slot
:param func: the function to be added in the slot
:param weight: used when you enable the *random apply*, it's the weight
of the function for the random selection
.. versionadded:: 0.6
The `weight` parameter.
"""
self.__typeCheck(func)
self.funcList.append(func)
self.funcWeights.append(weight)
# -----------------------------------------------------------------
def isEmpty(self):
""" Return true if the function slot is empy """
return (len(self.funcList) == 0)
# -----------------------------------------------------------------
def set(self, func, weight=0.5):
""" Used to clear all functions in the slot and add one
:param func: the function to be added in the slot
:param weight: used when you enable the *random apply*, it's the weight
of the function for the random selection
.. versionadded:: 0.6
The `weight` parameter.
.. note:: the method *set* of the function slot remove all previous
functions added to the slot.
"""
self.clear()
self.__typeCheck(func)
self.add(func, weight)
# -----------------------------------------------------------------
def apply(self, index, obj, **args):
""" Apply the index function
:param index: the index of the function
:param obj: this object is passes as parameter to the function
:param args: this args dictionary is passed to the function
"""
if len(self.funcList) <= 0:
raise Exception("No function defined: " + self.slotName)
return self.funcList[index](obj, **args)
# -----------------------------------------------------------------
def applyFunctions(self, obj=None, **args):
""" Generator to apply all function slots in obj
:param obj: this object is passes as parameter to the function
:param args: this args dictionary is passed to the function
"""
if len(self.funcList) <= 0:
utils.raiseException("No function defined: " + self.slotName)
if not self.rand_apply:
for f in self.funcList:
yield f(obj, **args)
else:
v = prng.uniform(0, 1)
fobj = None
for func, weight in zip(self.funcList, self.funcWeights):
fobj = func
if v < weight:
break
v = v - weight
yield fobj(obj, **args)
# -----------------------------------------------------------------
def __repr__(self):
""" String representation of FunctionSlot """
strRet = "Slot [%s] (Count: %d)\n" % (self.slotName, len(self.funcList))
if len(self.funcList) <= 0:
strRet += "\t\tNo function\n"
return strRet
for f, w in zip(self.funcList, self.funcWeights):
strRet += "\t\tName: %s - Weight: %.2f\n" % (f.func_name, w)
if f.func_doc:
strRet += "\t\tDoc: " + f.func_doc + "\n"
return strRet
# -----------------------------------------------------------------
| [
"utils.raiseException"
] | [((2841, 2917), 'utils.raiseException', 'utils.raiseException', (['"""The function must be a method or function"""', 'TypeError'], {}), "('The function must be a method or function', TypeError)\n", (2861, 2917), False, 'import utils\n'), ((4425, 4495), 'utils.raiseException', 'utils.raiseException', (['"""Random option must be True or False"""', 'TypeError'], {}), "('Random option must be True or False', TypeError)\n", (4445, 4495), False, 'import utils\n'), ((6988, 7049), 'utils.raiseException', 'utils.raiseException', (["('No function defined: ' + self.slotName)"], {}), "('No function defined: ' + self.slotName)\n", (7008, 7049), False, 'import utils\n')] |
"""Implements the test class for the spelling corrector"""
import json
import logging
import os
import sys
import unittest
from unittest import TestCase
from spelling_corrector import NorvigCorrector, SymmetricDeleteCorrector
class SpellingCorrectorTest(TestCase):
"""Implements the test class for the spelling corrector"""
def test_norvig_corrector(self):
"""Tests the norvig corrector"""
current_working_directory = os.path.abspath(os.getcwd())
tests_directory = os.path.join(current_working_directory, "tests")
logging.info("Tests the norvig corrector")
logging.info("Tests directory is %s" % tests_directory)
for test_directory_name in os.listdir(tests_directory):
logging.info("Testing in %s directory" % test_directory_name)
test_directory_path = os.path.join(tests_directory, test_directory_name)
dictionary_path = os.path.join(test_directory_path, "dictionary.txt")
test_input_2_expected_output_path = os.path.join(test_directory_path, "input_2_expected_output.json")
word_2_frequency = {}
with open(dictionary_path, "r") as dictionary_file:
logging.info("Reading the dictionary %s" % test_directory_name)
dictionary_lines = dictionary_file.readlines()
for _, line in enumerate(dictionary_lines):
word, frequency_value = line.strip().split()
word_2_frequency[word.lower()] = int(frequency_value)
spelling_corrector = NorvigCorrector(word_2_frequency)
with open(test_input_2_expected_output_path) as input_2_expected_output_file:
logging.info("Reading the test data")
input_2_expected_output = json.load(input_2_expected_output_file)
for input_, expected_output in input_2_expected_output.items():
logging.info("Expected output for the input '%s' is '%s'" % (input_, expected_output))
self.assertEqual(expected_output, spelling_corrector.correct(input_))
def test_symmetric_delete_corrector(self):
"""Tests the symmetric delete corrector"""
current_working_directory = os.path.abspath(os.getcwd())
tests_directory = os.path.join(current_working_directory, "tests")
logging.info("Tests the symmetric delete corrector")
logging.info("Tests directory is %s" % tests_directory)
for test_directory_name in os.listdir(tests_directory):
logging.info("Testing in %s directory" % test_directory_name)
test_directory_path = os.path.join(tests_directory, test_directory_name)
dictionary_path = os.path.join(test_directory_path, "dictionary.txt")
test_input_2_expected_output_path = os.path.join(test_directory_path, "input_2_expected_output.json")
word_2_frequency = {}
with open(dictionary_path, "r") as dictionary_file:
logging.info("Reading the dictionary %s" % test_directory_name)
dictionary_lines = dictionary_file.readlines()
for _, line in enumerate(dictionary_lines):
word, frequency_value = line.strip().split()
word_2_frequency[word.lower()] = int(frequency_value)
spelling_corrector = SymmetricDeleteCorrector(word_2_frequency)
with open(test_input_2_expected_output_path) as input_2_expected_output_file:
logging.info("Reading the test data")
input_2_expected_output = json.load(input_2_expected_output_file)
for input_, expected_output in input_2_expected_output.items():
logging.info("Expected output for the input '%s' is '%s'" % (input_, expected_output))
self.assertEqual(expected_output, spelling_corrector.correct(input_))
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
unittest.main()
| [
"logging.basicConfig",
"os.listdir",
"os.path.join",
"os.getcwd",
"json.load",
"spelling_corrector.NorvigCorrector",
"unittest.main",
"spelling_corrector.SymmetricDeleteCorrector",
"logging.info"
] | [((3930, 3989), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (3949, 3989), False, 'import logging\n'), ((3994, 4009), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4007, 4009), False, 'import unittest\n'), ((502, 550), 'os.path.join', 'os.path.join', (['current_working_directory', '"""tests"""'], {}), "(current_working_directory, 'tests')\n", (514, 550), False, 'import os\n'), ((559, 601), 'logging.info', 'logging.info', (['"""Tests the norvig corrector"""'], {}), "('Tests the norvig corrector')\n", (571, 601), False, 'import logging\n'), ((610, 665), 'logging.info', 'logging.info', (["('Tests directory is %s' % tests_directory)"], {}), "('Tests directory is %s' % tests_directory)\n", (622, 665), False, 'import logging\n'), ((701, 728), 'os.listdir', 'os.listdir', (['tests_directory'], {}), '(tests_directory)\n', (711, 728), False, 'import os\n'), ((2285, 2333), 'os.path.join', 'os.path.join', (['current_working_directory', '"""tests"""'], {}), "(current_working_directory, 'tests')\n", (2297, 2333), False, 'import os\n'), ((2342, 2394), 'logging.info', 'logging.info', (['"""Tests the symmetric delete corrector"""'], {}), "('Tests the symmetric delete corrector')\n", (2354, 2394), False, 'import logging\n'), ((2403, 2458), 'logging.info', 'logging.info', (["('Tests directory is %s' % tests_directory)"], {}), "('Tests directory is %s' % tests_directory)\n", (2415, 2458), False, 'import logging\n'), ((2494, 2521), 'os.listdir', 'os.listdir', (['tests_directory'], {}), '(tests_directory)\n', (2504, 2521), False, 'import os\n'), ((463, 474), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (472, 474), False, 'import os\n'), ((742, 803), 'logging.info', 'logging.info', (["('Testing in %s directory' % test_directory_name)"], {}), "('Testing in %s directory' % test_directory_name)\n", (754, 803), False, 'import logging\n'), ((838, 888), 'os.path.join', 'os.path.join', (['tests_directory', 'test_directory_name'], {}), '(tests_directory, test_directory_name)\n', (850, 888), False, 'import os\n'), ((919, 970), 'os.path.join', 'os.path.join', (['test_directory_path', '"""dictionary.txt"""'], {}), "(test_directory_path, 'dictionary.txt')\n", (931, 970), False, 'import os\n'), ((1019, 1084), 'os.path.join', 'os.path.join', (['test_directory_path', '"""input_2_expected_output.json"""'], {}), "(test_directory_path, 'input_2_expected_output.json')\n", (1031, 1084), False, 'import os\n'), ((1558, 1591), 'spelling_corrector.NorvigCorrector', 'NorvigCorrector', (['word_2_frequency'], {}), '(word_2_frequency)\n', (1573, 1591), False, 'from spelling_corrector import NorvigCorrector, SymmetricDeleteCorrector\n'), ((2246, 2257), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2255, 2257), False, 'import os\n'), ((2535, 2596), 'logging.info', 'logging.info', (["('Testing in %s directory' % test_directory_name)"], {}), "('Testing in %s directory' % test_directory_name)\n", (2547, 2596), False, 'import logging\n'), ((2631, 2681), 'os.path.join', 'os.path.join', (['tests_directory', 'test_directory_name'], {}), '(tests_directory, test_directory_name)\n', (2643, 2681), False, 'import os\n'), ((2712, 2763), 'os.path.join', 'os.path.join', (['test_directory_path', '"""dictionary.txt"""'], {}), "(test_directory_path, 'dictionary.txt')\n", (2724, 2763), False, 'import os\n'), ((2812, 2877), 'os.path.join', 'os.path.join', (['test_directory_path', '"""input_2_expected_output.json"""'], {}), "(test_directory_path, 'input_2_expected_output.json')\n", (2824, 2877), False, 'import os\n'), ((3351, 3393), 'spelling_corrector.SymmetricDeleteCorrector', 'SymmetricDeleteCorrector', (['word_2_frequency'], {}), '(word_2_frequency)\n', (3375, 3393), False, 'from spelling_corrector import NorvigCorrector, SymmetricDeleteCorrector\n'), ((1199, 1262), 'logging.info', 'logging.info', (["('Reading the dictionary %s' % test_directory_name)"], {}), "('Reading the dictionary %s' % test_directory_name)\n", (1211, 1262), False, 'import logging\n'), ((1698, 1735), 'logging.info', 'logging.info', (['"""Reading the test data"""'], {}), "('Reading the test data')\n", (1710, 1735), False, 'import logging\n'), ((1778, 1817), 'json.load', 'json.load', (['input_2_expected_output_file'], {}), '(input_2_expected_output_file)\n', (1787, 1817), False, 'import json\n'), ((2992, 3055), 'logging.info', 'logging.info', (["('Reading the dictionary %s' % test_directory_name)"], {}), "('Reading the dictionary %s' % test_directory_name)\n", (3004, 3055), False, 'import logging\n'), ((3500, 3537), 'logging.info', 'logging.info', (['"""Reading the test data"""'], {}), "('Reading the test data')\n", (3512, 3537), False, 'import logging\n'), ((3580, 3619), 'json.load', 'json.load', (['input_2_expected_output_file'], {}), '(input_2_expected_output_file)\n', (3589, 3619), False, 'import json\n'), ((1918, 2008), 'logging.info', 'logging.info', (['("Expected output for the input \'%s\' is \'%s\'" % (input_, expected_output))'], {}), '("Expected output for the input \'%s\' is \'%s\'" % (input_,\n expected_output))\n', (1930, 2008), False, 'import logging\n'), ((3720, 3810), 'logging.info', 'logging.info', (['("Expected output for the input \'%s\' is \'%s\'" % (input_, expected_output))'], {}), '("Expected output for the input \'%s\' is \'%s\'" % (input_,\n expected_output))\n', (3732, 3810), False, 'import logging\n')] |
from django.conf import settings
from django.conf.urls import url, patterns, include
from django.contrib import admin
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.views.decorators.cache import cache_page
from django.views.generic.base import TemplateView, RedirectView
from .utils import GEOGRAPHIES_MAP
from .views import (HomepageView, GeographyDetailView,
TableDetailView, PlaceSearchJson, GeoSearch,
HealthcheckView, DataView, TopicView, ExampleView,
MakeJSONView, SitemapTopicsView, SearchResultsView)
admin.autodiscover()
STANDARD_CACHE_TIME = 60*60*24 # 24-hour cache
COMPARISON_FORMATS = 'map|table|distribution'
BLOCK_ROBOTS = getattr(settings, 'BLOCK_ROBOTS', False)
urlpatterns = patterns('',
url(
regex = '^$',
view = cache_page(STANDARD_CACHE_TIME)(HomepageView.as_view()),
kwargs = {},
name = 'homepage',
),
# e.g. /profiles/16000US5367000/ (Spokane, WA)
# this should redirect to slugged version of the URL above
url(
regex = '^profiles/(?P<fragment>[a-zA-Z0-9\-]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(GeographyDetailView.as_view()),
kwargs = {},
name = 'geography_detail',
),
url(
regex = '^profiles/$',
view = RedirectView.as_view(url=reverse_lazy('search')),
kwargs = {},
name = 'geography_search_redirect',
),
url(
regex = '^make-json/charts/$',
view = MakeJSONView.as_view(),
kwargs = {},
name = 'make_json_charts',
),
# e.g. /table/B01001/
url(
regex = '^tables/B23002/$',
view = RedirectView.as_view(url=reverse_lazy('table_detail',kwargs={'table':'B23002A'})),
kwargs = {},
name = 'redirect_B23002',
),
url(
regex = '^tables/C23002/$',
view = RedirectView.as_view(url=reverse_lazy('table_detail',kwargs={'table':'C23002A'})),
kwargs = {},
name = 'redirect_C23002',
),
url(
regex = '^tables/(?P<table>[a-zA-Z0-9]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(TableDetailView.as_view()),
kwargs = {},
name = 'table_detail',
),
url(
regex = '^tables/$',
view = RedirectView.as_view(url=reverse_lazy('search')),
kwargs = {},
name = 'table_search',
),
url(
regex = '^search/$',
view = SearchResultsView.as_view(),
kwargs = {},
name = 'search'
),
url(
regex = '^data/$',
view = RedirectView.as_view(url=reverse_lazy('table_search')),
kwargs = {},
name = 'table_search_redirect',
),
# e.g. /table/B01001/
url(
regex = '^data/(?P<format>%s)/$' % COMPARISON_FORMATS,
view = cache_page(STANDARD_CACHE_TIME)(DataView.as_view()),
kwargs = {},
name = 'data_detail',
),
url(
regex = '^topics/$',
view = cache_page(STANDARD_CACHE_TIME)(TopicView.as_view()),
kwargs = {},
name = 'topic_list',
),
url(
regex = '^topics/race-latino/?$',
view = RedirectView.as_view(url=reverse_lazy('topic_detail', kwargs={'topic_slug': 'race-hispanic'})),
name = 'topic_latino_redirect',
),
url(
regex = '^topics/(?P<topic_slug>[-\w]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(TopicView.as_view()),
kwargs = {},
name = 'topic_detail',
),
url(
regex = '^examples/(?P<example_slug>[-\w]+)/$',
view = cache_page(STANDARD_CACHE_TIME)(ExampleView.as_view()),
kwargs = {},
name = 'example_detail',
),
url(
regex = '^glossary/$',
view = cache_page(STANDARD_CACHE_TIME)(TemplateView.as_view(template_name="glossary.html")),
kwargs = {},
name = 'glossary',
),
url(
regex = '^locate/$',
view = cache_page(STANDARD_CACHE_TIME)(TemplateView.as_view(template_name="locate/locate.html")),
kwargs = {},
name = 'locate',
),
url(
regex = '^healthcheck$',
view = HealthcheckView.as_view(),
kwargs = {},
name = 'healthcheck',
),
url(
regex = '^robots.txt$',
view = lambda r: HttpResponse(
"User-agent: *\n%s: /" % ('Disallow' if BLOCK_ROBOTS else 'Allow') ,
mimetype="text/plain"
)
),
url(
regex = '^topics/sitemap.xml$',
view = SitemapTopicsView.as_view(),
kwargs = {},
name = 'sitemap_topics'
),
## LOCAL DEV VERSION OF API ##
url(
regex = '^place-search/json/$',
view = PlaceSearchJson.as_view(),
kwargs = {},
name = 'place_search_json',
),
url(
regex = '^geo-search/$',
view = GeoSearch.as_view(),
kwargs = {},
name = 'geo_search',
),
## END LOCAL DEV VERSION OF API ##
)
| [
"django.views.generic.base.TemplateView.as_view",
"django.http.HttpResponse",
"django.views.decorators.cache.cache_page",
"django.core.urlresolvers.reverse_lazy",
"django.contrib.admin.autodiscover"
] | [((575, 595), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (593, 595), False, 'from django.contrib import admin\n'), ((825, 856), 'django.views.decorators.cache.cache_page', 'cache_page', (['STANDARD_CACHE_TIME'], {}), '(STANDARD_CACHE_TIME)\n', (835, 856), False, 'from django.views.decorators.cache import cache_page\n'), ((1145, 1176), 'django.views.decorators.cache.cache_page', 'cache_page', (['STANDARD_CACHE_TIME'], {}), '(STANDARD_CACHE_TIME)\n', (1155, 1176), False, 'from django.views.decorators.cache import cache_page\n'), ((2162, 2193), 'django.views.decorators.cache.cache_page', 'cache_page', (['STANDARD_CACHE_TIME'], {}), '(STANDARD_CACHE_TIME)\n', (2172, 2193), False, 'from django.views.decorators.cache import cache_page\n'), ((2896, 2927), 'django.views.decorators.cache.cache_page', 'cache_page', (['STANDARD_CACHE_TIME'], {}), '(STANDARD_CACHE_TIME)\n', (2906, 2927), False, 'from django.views.decorators.cache import cache_page\n'), ((3070, 3101), 'django.views.decorators.cache.cache_page', 'cache_page', (['STANDARD_CACHE_TIME'], {}), '(STANDARD_CACHE_TIME)\n', (3080, 3101), False, 'from django.views.decorators.cache import cache_page\n'), ((3485, 3516), 'django.views.decorators.cache.cache_page', 'cache_page', (['STANDARD_CACHE_TIME'], {}), '(STANDARD_CACHE_TIME)\n', (3495, 3516), False, 'from django.views.decorators.cache import cache_page\n'), ((3688, 3719), 'django.views.decorators.cache.cache_page', 'cache_page', (['STANDARD_CACHE_TIME'], {}), '(STANDARD_CACHE_TIME)\n', (3698, 3719), False, 'from django.views.decorators.cache import cache_page\n'), ((3870, 3901), 'django.views.decorators.cache.cache_page', 'cache_page', (['STANDARD_CACHE_TIME'], {}), '(STANDARD_CACHE_TIME)\n', (3880, 3901), False, 'from django.views.decorators.cache import cache_page\n'), ((3902, 3953), 'django.views.generic.base.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""glossary.html"""'}), "(template_name='glossary.html')\n", (3922, 3953), False, 'from django.views.generic.base import TemplateView, RedirectView\n'), ((4074, 4105), 'django.views.decorators.cache.cache_page', 'cache_page', (['STANDARD_CACHE_TIME'], {}), '(STANDARD_CACHE_TIME)\n', (4084, 4105), False, 'from django.views.decorators.cache import cache_page\n'), ((4106, 4162), 'django.views.generic.base.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""locate/locate.html"""'}), "(template_name='locate/locate.html')\n", (4126, 4162), False, 'from django.views.generic.base import TemplateView, RedirectView\n'), ((4441, 4551), 'django.http.HttpResponse', 'HttpResponse', (['("""User-agent: *\n%s: /""" % (\'Disallow\' if BLOCK_ROBOTS else \'Allow\'))'], {'mimetype': '"""text/plain"""'}), '("""User-agent: *\n%s: /""" % (\'Disallow\' if BLOCK_ROBOTS else\n \'Allow\'), mimetype=\'text/plain\')\n', (4453, 4551), False, 'from django.http import HttpResponse\n'), ((1362, 1384), 'django.core.urlresolvers.reverse_lazy', 'reverse_lazy', (['"""search"""'], {}), "('search')\n", (1374, 1384), False, 'from django.core.urlresolvers import reverse_lazy\n'), ((1740, 1797), 'django.core.urlresolvers.reverse_lazy', 'reverse_lazy', (['"""table_detail"""'], {'kwargs': "{'table': 'B23002A'}"}), "('table_detail', kwargs={'table': 'B23002A'})\n", (1752, 1797), False, 'from django.core.urlresolvers import reverse_lazy\n'), ((1955, 2012), 'django.core.urlresolvers.reverse_lazy', 'reverse_lazy', (['"""table_detail"""'], {'kwargs': "{'table': 'C23002A'}"}), "('table_detail', kwargs={'table': 'C23002A'})\n", (1967, 2012), False, 'from django.core.urlresolvers import reverse_lazy\n'), ((2369, 2391), 'django.core.urlresolvers.reverse_lazy', 'reverse_lazy', (['"""search"""'], {}), "('search')\n", (2381, 2391), False, 'from django.core.urlresolvers import reverse_lazy\n'), ((2674, 2702), 'django.core.urlresolvers.reverse_lazy', 'reverse_lazy', (['"""table_search"""'], {}), "('table_search')\n", (2686, 2702), False, 'from django.core.urlresolvers import reverse_lazy\n'), ((3282, 3350), 'django.core.urlresolvers.reverse_lazy', 'reverse_lazy', (['"""topic_detail"""'], {'kwargs': "{'topic_slug': 'race-hispanic'}"}), "('topic_detail', kwargs={'topic_slug': 'race-hispanic'})\n", (3294, 3350), False, 'from django.core.urlresolvers import reverse_lazy\n')] |
from re import search
from editor import get_path
def count_spaces(line):
patter = r' {1,6}'
return search(patter, line).span()[1]
def replace():
steps={1:'Extracting content',2:'Starting edition',3:'Getting cuantity'}
actual=1
try:
file = get_path()
print(steps[1])
content = open(file).read()
line = [x for x in content.split('\n') if ' ' in x[:1]][0]
actual=2
print(steps[2])
actual=3
print(steps[3])
cuantity = count_spaces(line)
with open(file, 'w') as file:
file.write(content.replace(' ' * cuantity, '\t'))
file.close()
print('Done')
except Exception as e:
print('++{}++'.format(e))
print('Error in step {}'.format(actual))
print('({})'.format(steps[actual]))
replace()
| [
"editor.get_path",
"re.search"
] | [((251, 261), 'editor.get_path', 'get_path', ([], {}), '()\n', (259, 261), False, 'from editor import get_path\n'), ((104, 124), 're.search', 'search', (['patter', 'line'], {}), '(patter, line)\n', (110, 124), False, 'from re import search\n')] |
import geocoder
from .log_helper import LogHelper
_logger = LogHelper.getLogger()
class Geocoder:
def reverse_geocode(self, lat, lon):
"""Retrieves reverse geocoding informatioon for the given latitude and longitude.
Args:
lat, long: latitude and longitude to reverse geocode, as floats
Returns:
geolocation JSON
"""
# TODO: Add some kind of throttling and/or caching to prevent us from sending more than 1 req/sec.
response = geocoder.osm([lat, lon], method="reverse")
if response.status_code != 200 or response.status != "OK":
_logger.error(
"Reverse geocode lookup for (%s, %s) failed with: %s",
lat,
lon,
response.status,
)
return None
return self._json_to_address(response.json)
def _json_to_address(self, geo_json):
"""Convert geocoding JSON to a uniform format for our own use."""
if (osm_address := geo_json.get("raw", {}).get("address")) is None:
_logger.warning("Reverse geocoding result did not include raw.address")
return None
address = {}
address["country_code"] = osm_address.get("country_code")
address["city"] = self._get_preferred_key(
osm_address, ["city", "town", "municipality", "village"]
)
address["country"] = osm_address.get("country")
address["state"] = self._get_preferred_key(
osm_address, ["state", "region", "state_district"]
)
return address
def _get_preferred_key(self, some_dict, keys):
for key in keys:
if key in some_dict:
return some_dict.get(key)
return None
| [
"geocoder.osm"
] | [((509, 551), 'geocoder.osm', 'geocoder.osm', (['[lat, lon]'], {'method': '"""reverse"""'}), "([lat, lon], method='reverse')\n", (521, 551), False, 'import geocoder\n')] |
import numpy as np
import cPickle
class Distribution(object):
def dim(self):
raise NotImplementedError('abstract base class')
def predict(self, cond=None):
raise NotImplementedError('abstract base class')
def sample(self, cond=None, key_prefix=""):
raise NotImplementedError('abstract base class')
def log_p(self, x, cond=None, key_prefix=""):
raise NotImplementedError('abstract base class')
def deriv_log_p(self, x, idx=None, cond=None, cond_key=None, cond_idx=None, lp0=None, eps=1e-4, **kwargs):
"""
Derivative of log P(X = x | cond = cond) with
respect to x_idx (if idx is not None) or with
respect to cond[cond_key]_{cond_idx} (if those
quantities are not None).
The default implementation computes a numerical
approximation to the derivative:
df/dx ~= f(x + eps)
"""
lp0 = lp0 if lp0 else self.log_p(x=x, cond=cond, **kwargs)
if cond_key is None:
# we're computing df/dx
if idx is None:
# assume x is scalar
deriv = ( self.log_p(x = x + eps, cond=cond, **kwargs) - lp0 ) / eps
else:
x[idx] += eps
deriv = ( self.log_p(x = x, cond=cond, **kwargs) - lp0 ) / eps
x[idx] -= eps
else:
# we're computing df/dcond[cond_key]
if cond_idx is None:
cond[cond_key] += eps
deriv = ( self.log_p(x = x, cond=cond, **kwargs) - lp0 ) / eps
cond[cond_key] -= eps
else:
cond[cond_key][cond_idx] += eps
deriv = ( self.log_p(x = x, cond=cond, **kwargs) - lp0 ) / eps
cond[cond_key][cond_idx] -= eps
return deriv
def dump_to_file(self, fname):
with open(fname, 'wb') as f:
cPickle.dump(self, f, cPickle.HIGHEST_PROTOCOL)
@staticmethod
def load_from_file(fname):
raise NotImplementedError('abstract base class')
def save_to_db(self, dbconn):
raise NotImplementedError('abstract base class')
@staticmethod
def load_from_db(dbconn, return_extra=False):
raise NotImplementedError('abstract base class')
class TimeSeriesDist(Distribution):
def predict(self, n):
raise NotImplementedError('abstract base class')
def sample(self, n):
raise NotImplementedError('abstract base class')
class DummyModel(Distribution):
def __init__(self, default_value = None, **kwargs):
super(DummyModel, self).__init__(**kwargs)
self.default_value = default_value if default_value is not None else 0.0
def log_p(self, x, **kwargs):
return 0.0
def sample(self, **kwargs):
return self.default_value
def predict(self, **kwargs):
return self.default_value
class Constraint(DummyModel):
def __init__(self, a=0, b=0, **kwargs):
super(Constraint, self).__init__(**kwargs)
self.a=a
self.b=b
def log_p(self, x, **kwargs):
if self.a is not None and x < self.a:
return -np.inf
if self.b is not None and x > self.b:
return -np.inf
return 0.0
| [
"cPickle.dump"
] | [((1903, 1950), 'cPickle.dump', 'cPickle.dump', (['self', 'f', 'cPickle.HIGHEST_PROTOCOL'], {}), '(self, f, cPickle.HIGHEST_PROTOCOL)\n', (1915, 1950), False, 'import cPickle\n')] |
from django.urls import path
from foodapp.views import *
from django.views.generic.base import TemplateView
from .views import FoodCreateView,FoodListView
from django.conf import settings
from django.conf.urls.static import static
'''
TemplateView is built-in django Class Based view Class
which is used to render the request to template,
'''
urlpatterns = [
# FBV
# path('',index),
# path('addfood',addfood),
path('',TemplateView.as_view(template_name='foodapp/index.html'),name="Home"),
path('addfood',FoodCreateView.as_view(),name='addfood'),
path('foodlist',FoodListView.as_view(),name='foodmenu'),
path('foodupdate/<pk>',FoodUpdateView.as_view(),name='foodupdate'),
path('fooddelete/<pk>',FoodDeleteView.as_view(),name='fooddelete'),
path('fooddetail/<pk>',FoodDetailView.as_view(),name='fooddetail')
]#+static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"django.views.generic.base.TemplateView.as_view"
] | [((445, 501), 'django.views.generic.base.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""foodapp/index.html"""'}), "(template_name='foodapp/index.html')\n", (465, 501), False, 'from django.views.generic.base import TemplateView\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
import re
import urllib
import json
import socket
import time
import multiprocessing
from multiprocessing.dummy import Pool
from multiprocessing import Queue
import requests
timeout = 5
socket.setdefaulttimeout(timeout)
class Image(object):
"""图片类,保存图片信息"""
def __init__(self, url, save_path, referer):
super(Image, self).__init__()
self.url = url
self.save_path = save_path
self.referer = referer
class Crawler:
# 睡眠时长
__time_sleep = 0.1
__amount = 0
__start_amount = 0
__counter = 0
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '
'Chromium/58.0.3029.110 Chrome/58.0.3029.110 Safari/537.36'}
# 获取图片url内容等
# t 下载图片时间间隔
def __init__(self, t=0.1):
self.dirpath = dirpath
self.time_sleep = t
self.pool = Pool(30)
self.session = requests.Session()
self.session.headers = Crawler.headers
self.queue = Queue()
self.delay = 1.5 # 网络请求太频繁会被封
self.__down_counter = 1
# 获取后缀名
@staticmethod
def __get_suffix(name):
m = re.search(r'\.[^\.]*$', name)
if m.group(0) and len(m.group(0)) <= 5:
return m.group(0)
else:
return '.jpeg'
# 获取前缀
@staticmethod
def __get_prefix(name):
return name[:name.find('.')]
# 保存图片
def __resolve_img_url(self, rsp_data, referer):
imgs = []
for image_info in rsp_data['imgs']:
fix = self.__get_suffix(image_info['objURL'])
local_path = os.path.join(self.__work_path, str(self.__counter) + str(fix))
image = Image(image_info['objURL'], local_path, referer)
imgs.append(image)
print("图片+1,已有" + str(self.__down_counter) + "张")
self.__down_counter += 1
self.__counter += 1
self.queue.put(imgs)
return
# 开始获取
def __resolve_json(self, word=''):
search = urllib.quote(word)
# pn 图片数
pn = self.__start_amount
while pn < self.__amount:
url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + search + '&cg=girl&pn=' + str(
pn) + '&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'
# 沿用session防ban
try:
time.sleep(self.delay)
req = self.session.get(url=url, timeout=15)
rsp = req.text
except UnicodeDecodeError as e:
print(e)
print('-----UnicodeDecodeErrorurl:', url)
except requests.exceptions.RequestException as e:
print(e)
print("-----Error:", url)
except socket.timeout as e:
print(e)
print("-----socket timout:", url)
else:
# 解析json
try:
rsp_data = json.loads(rsp)
self.__resolve_img_url(rsp_data, url)
except ValueError:
pass
# 读取下一页
print("读取下一页json")
pn += 60
print("解析json完成")
return
def __downImg(self, img):
"""下载单张图片,传入的是Image对象"""
# try:
# time.sleep(self.delay)
# urllib.urlretrieve(img.url, img.save_path)
# except requests.exceptions.HTTPError as e:
# print(e)
# except Exception as err:
# time.sleep(1)
# print(err)
# print("产生未知错误,放弃保存")
imgUrl = img.url
# self.messageQueue.put("线程 %s 正在下载 %s " %
# (threading.current_thread().name, imgUrl))
try:
time.sleep(self.delay)
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '
'Chromium/58.0.3029.110 Chrome/58.0.3029.110 Safari/537.36'}
headers['Referer'] = img.referer
res = requests.get(imgUrl, headers=headers, timeout=15)
with open(img.save_path, "wb") as f:
f.write(res.content)
except Exception as e:
message = "抛出异常: %s%s" % (imgUrl, str(e))
print(message)
def start(self, index, word, spider_page_num=1, start_page=1):
"""
爬虫入口
:param word: 抓取的关键词
:param spider_page_num: 需要抓取数据页数 总抓取图片数量为 页数x60
:param start_page: 起始页数
:return:
"""
self.__work_path = os.path.join(self.dirpath, index)
if not os.path.exists(self.__work_path):
os.mkdir(self.__work_path)
self.__counter = len(os.listdir(self.__work_path)) + 1 # 判断本地名字是否重复,获取目录下图片数
self.__start_amount = (start_page - 1) * 60
self.__amount = spider_page_num * 60 + self.__start_amount
self.__resolve_json(word)
while self.queue.qsize():
imgs = self.queue.get()
self.pool.map_async(self.__downImg, imgs)
self.pool.close()
self.pool.join()
print('完成保存')
if __name__ == '__main__':
dirpath = os.path.join(sys.path[0], 'results')
if not os.path.exists(dirpath):
os.mkdir(dirpath)
with open('name.json') as f:
json_data = json.load(f)
# word = str(input("请输入图片关键字: \n"))
sort_data = sorted([(int(k), v) for k, v in json_data.items()])
print('开始')
for index, name in sort_data:
folder = str(index)
person = name.encode('utf-8')
print('开始抓取 {}:{}'.format(folder, person))
if folder in os.listdir('./results'):
print('已存在, continue')
continue
crawler = Crawler(0.05)
crawler.dirpath = dirpath
crawler.start(folder, person, 2, 1)
| [
"os.path.exists",
"json.loads",
"os.listdir",
"requests.Session",
"os.path.join",
"time.sleep",
"requests.get",
"urllib.quote",
"os.mkdir",
"multiprocessing.dummy.Pool",
"json.load",
"multiprocessing.Queue",
"socket.setdefaulttimeout",
"re.search"
] | [((255, 288), 'socket.setdefaulttimeout', 'socket.setdefaulttimeout', (['timeout'], {}), '(timeout)\n', (279, 288), False, 'import socket\n'), ((5278, 5314), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""results"""'], {}), "(sys.path[0], 'results')\n", (5290, 5314), False, 'import os\n'), ((964, 972), 'multiprocessing.dummy.Pool', 'Pool', (['(30)'], {}), '(30)\n', (968, 972), False, 'from multiprocessing.dummy import Pool\n'), ((996, 1014), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1012, 1014), False, 'import requests\n'), ((1083, 1090), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1088, 1090), False, 'from multiprocessing import Queue\n'), ((1233, 1263), 're.search', 're.search', (['"""\\\\.[^\\\\.]*$"""', 'name'], {}), "('\\\\.[^\\\\.]*$', name)\n", (1242, 1263), False, 'import re\n'), ((2092, 2110), 'urllib.quote', 'urllib.quote', (['word'], {}), '(word)\n', (2104, 2110), False, 'import urllib\n'), ((4675, 4708), 'os.path.join', 'os.path.join', (['self.dirpath', 'index'], {}), '(self.dirpath, index)\n', (4687, 4708), False, 'import os\n'), ((5326, 5349), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (5340, 5349), False, 'import os\n'), ((5359, 5376), 'os.mkdir', 'os.mkdir', (['dirpath'], {}), '(dirpath)\n', (5367, 5376), False, 'import os\n'), ((5431, 5443), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5440, 5443), False, 'import json\n'), ((3860, 3882), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (3870, 3882), False, 'import time\n'), ((4162, 4211), 'requests.get', 'requests.get', (['imgUrl'], {'headers': 'headers', 'timeout': '(15)'}), '(imgUrl, headers=headers, timeout=15)\n', (4174, 4211), False, 'import requests\n'), ((4724, 4756), 'os.path.exists', 'os.path.exists', (['self.__work_path'], {}), '(self.__work_path)\n', (4738, 4756), False, 'import os\n'), ((4770, 4796), 'os.mkdir', 'os.mkdir', (['self.__work_path'], {}), '(self.__work_path)\n', (4778, 4796), False, 'import os\n'), ((5740, 5763), 'os.listdir', 'os.listdir', (['"""./results"""'], {}), "('./results')\n", (5750, 5763), False, 'import os\n'), ((2485, 2507), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (2495, 2507), False, 'import time\n'), ((4826, 4854), 'os.listdir', 'os.listdir', (['self.__work_path'], {}), '(self.__work_path)\n', (4836, 4854), False, 'import os\n'), ((3065, 3080), 'json.loads', 'json.loads', (['rsp'], {}), '(rsp)\n', (3075, 3080), False, 'import json\n')] |
from setuptools import setup, find_packages
from codecs import open
import os
# Get the long description from the README file
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.md")) as f:
long_description = f.read()
# Get the version
for line in open(os.path.join(here, "strup", "__init__.py")):
if line.startswith("__version__"):
version = line.split("=")[1].strip()[1:-1]
# List packages we depend on (end users)
dependencies = []
# Packages for development of strup (assumed on Python 3.x)
dependencies_dev = ["pytest>=5.1", "pytest-cov", "coverage", "black", "coveralls"]
# Packages for testing strup without syntax and coverage checks (for CI checks on old images)
dependencies_test = ["pytest>=4.6"]
setup(
name="strup",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description=(
"A package for unpacking int, float, string and bool objects from a text string."
),
long_description=long_description,
long_description_content_type="text/markdown",
# The project's main homepage.
url="https://github.com/jeblohe/strup",
# Author details
author="<NAME>",
author_email="<EMAIL>",
# Choose your license
license="MIT",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 5 - Production/Stable",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: Manufacturing",
"Intended Audience :: Other Audience",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"Topic :: Text Processing",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
# Supported Python versions (pip will refuse to install on other versions)
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
# What does your project relate to?
keywords="text processing",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# List run-time dependencies here. These will be installed by pip when
# your project is installed.
install_requires=dependencies,
# List additional groups of dependencies here (e.g. development and test
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install strup[dev] # or: pip install -e .[dev]
# $ pip install strup[test]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={
"dev": dependencies_dev,
"test": dependencies_test,
},
# If there are data files included in your packages that need to be
# installed, specify them here.
package_data={},
zip_safe=True,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={"console_scripts": []},
# List additional URLs that are relevant to your project as a dict.
project_urls={
"Documentation": "https://strup.readthedocs.io/",
"Bug Tracker": "https://github.com/jeblohe/strup/issues",
"Source Code": "https://github.com/jeblohe/strup/",
},
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
] | [((151, 176), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (166, 176), False, 'import os\n'), ((295, 337), 'os.path.join', 'os.path.join', (['here', '"""strup"""', '"""__init__.py"""'], {}), "(here, 'strup', '__init__.py')\n", (307, 337), False, 'import os\n'), ((188, 219), 'os.path.join', 'os.path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (200, 219), False, 'import os\n'), ((3270, 3285), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (3283, 3285), False, 'from setuptools import setup, find_packages\n')] |
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Runs a power flow.
"""
from sys import stdout, stderr
from os.path import dirname, join
from time import time
from numpy import r_, c_, ix_, zeros, pi, ones, exp, argmax,angle
from numpy import flatnonzero as find
#from pypower.bustypes import bustypes
#from pypower.ext2int import ext2int
#from pypower.loadcase import loadcase
#from pypower.ppoption import ppoption
#from pypower.ppver import ppver
#from pypower.makeBdc import makeBdc
from pypower.makeSbus import makeSbus
#from pypower.dcpf import dcpf
#from pypower.makeYbus import makeYbus
from pypower.newtonpf_fast import newtonpf_fast
#from pypower.fdpf import fdpf
#from pypower.gausspf import gausspf
#from pypower.makeB import makeB
#from pypower.pfsoln import pfsoln
#from pypower.printpf import printpf
#from pypower.savecase import savecase
#from pypower.int2ext import int2ext
from pypower.idx_bus import PD, QD, VM, VA, GS, BUS_TYPE, PQ, REF
from pypower.idx_brch import PF, PT, QF, QT
from pypower.idx_gen import PG, QG, VG, QMAX, QMIN, GEN_BUS, GEN_STATUS
def runpf_fast(Ybus, Yf,Yt,ref, pv, pq,on,ppc, ppopt=None, fname='', solvedcase=''):
"""Runs a power flow.
Runs a power flow [full AC Newton's method by default] and optionally
returns the solved values in the data matrices, a flag which is C{True} if
the algorithm was successful in finding a solution, and the elapsed
time in seconds. All input arguments are optional. If C{casename} is
provided it specifies the name of the input data file or dict
containing the power flow data. The default value is 'case9'.
If the ppopt is provided it overrides the default PYPOWER options
vector and can be used to specify the solution algorithm and output
options among other things. If the 3rd argument is given the pretty
printed output will be appended to the file whose name is given in
C{fname}. If C{solvedcase} is specified the solved case will be written
to a case file in PYPOWER format with the specified name. If C{solvedcase}
ends with '.mat' it saves the case as a MAT-file otherwise it saves it
as a Python-file.
If the C{ENFORCE_Q_LIMS} options is set to C{True} [default is false] then
if any generator reactive power limit is violated after running the AC
power flow, the corresponding bus is converted to a PQ bus, with Qg at
the limit, and the case is re-run. The voltage magnitude at the bus
will deviate from the specified value in order to satisfy the reactive
power limit. If the reference bus is converted to PQ, the first
remaining PV bus will be used as the slack bus for the next iteration.
This may result in the real power output at this generator being
slightly off from the specified values.
Enforcing of generator Q limits inspired by contributions from Mu Lin,
Lincoln University, New Zealand (1/14/05).
@author: <NAME> (PSERC Cornell)
"""
## default arguments
## options
## read data
#ppc = loadcase(casedata)
## convert to internal indexing
ppc["branch"][:,[0,1]]-=1
ppc["bus"][:,0]-=1
ppc["gen"][:,0]-=1
baseMVA, bus, gen, branch = \
ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"]
## get bus index lists of each type of bus
#ref, pv, pq = bustypes(bus, gen)
#
# generator info
#print(gen[:, GEN_STATUS])
#on = find(gen[:, GEN_STATUS] > 0) ## which generators are on?
gbus = gen[on, GEN_BUS].astype(int) ## what buses are they at?
##----- run the power flow -----
t0 = time()
V0 = bus[:, VM] * exp(1j * 0.017453292519943295 * bus[:, VA])
V0[gbus] = gen[on, VG] / abs(V0[gbus]) * V0[gbus]
## build admittance matrices
#Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)
## compute complex bus power injections [generation - load]
Sbus = makeSbus(baseMVA, bus, gen)
## run the power flow
V, success, i = newtonpf_fast(Ybus, Sbus, V0, ref, pv, pq, ppopt)
## update data matrices with solution
#bus, gen, branch = pfsoln(baseMVA, bus, gen, branch, Ybus, Yf, Yt, V, ref, pv, pq)
bus[:, VM] = abs(V)
bus[:, VA] = angle(V) * 180 / pi
#UNTIL HERE
ppc["et"] = time() - t0
ppc["success"] = success
##----- output results -----
## convert back to original bus numbering & print results
ppc["bus"], ppc["gen"], ppc["branch"] = bus, gen, branch
ppc["branch"][:,[0,1]]+=1
ppc["bus"][:,0]+=1
ppc["gen"][:,0]+=1
return ppc, success,i
if __name__ == '__main__':
runpf()
| [
"numpy.angle",
"numpy.exp",
"pypower.newtonpf_fast.newtonpf_fast",
"time.time",
"pypower.makeSbus.makeSbus"
] | [((3722, 3728), 'time.time', 'time', ([], {}), '()\n', (3726, 3728), False, 'from time import time\n'), ((4014, 4041), 'pypower.makeSbus.makeSbus', 'makeSbus', (['baseMVA', 'bus', 'gen'], {}), '(baseMVA, bus, gen)\n', (4022, 4041), False, 'from pypower.makeSbus import makeSbus\n'), ((4090, 4139), 'pypower.newtonpf_fast.newtonpf_fast', 'newtonpf_fast', (['Ybus', 'Sbus', 'V0', 'ref', 'pv', 'pq', 'ppopt'], {}), '(Ybus, Sbus, V0, ref, pv, pq, ppopt)\n', (4103, 4139), False, 'from pypower.newtonpf_fast import newtonpf_fast\n'), ((3754, 3799), 'numpy.exp', 'exp', (['(1.0j * 0.017453292519943295 * bus[:, VA])'], {}), '(1.0j * 0.017453292519943295 * bus[:, VA])\n', (3757, 3799), False, 'from numpy import r_, c_, ix_, zeros, pi, ones, exp, argmax, angle\n'), ((4365, 4371), 'time.time', 'time', ([], {}), '()\n', (4369, 4371), False, 'from time import time\n'), ((4312, 4320), 'numpy.angle', 'angle', (['V'], {}), '(V)\n', (4317, 4320), False, 'from numpy import r_, c_, ix_, zeros, pi, ones, exp, argmax, angle\n')] |
# This program imports the federal reserve economic data consumer price index
# values from 1990 and uses those values to get the real values or infaltion adjusted
# values of the sepcific commodities/markets.
# Then when a commdoity hits a specific low infaltion based price, the algo
# enters into a long psoiton and exits when the commodity/market hits a relativley
# high price.
import numpy
import csv
#elemnt zero is the oldest elment, in this case, inflation from 2/1/1990
def cpi_array():
cpi_array = numpy.zeros((328))
count = 0
with open("CPI_Spyder.csv", 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
cpi = float(row[1])
cpi_array[count] = cpi
count += 1
csvfile.close()
return cpi_array
#market dicitonary [buy price, sell price, current pos, iniital entry pos, fall by price, add to pos price
#if it falls by 'fall by price', # of times added to the pos]
def market_dictionary():
market_dictionary = {}
market_dictionary[0] = [10000.0,12500.0,0,.5,.08,.1, 0]
market_dictionary[1] = [8000.0,12000.0,0,.5,.12,.1, 0]
market_dictionary[2] = [20000.0,25000.0,0,.5,.1,.1, 0]
market_dictionary[3] = [15000.0,20000.0,0,.5,.06,.1, 0]
market_dictionary[4] = [26000.0,36000.0,0,.5,.07,.1, 0]
market_dictionary[5] = [25000.0,30000.0,0,.5,.08,.1, 0]
market_dictionary[6] = [20000.0,21000.0,0,.5,.05,.1, 0]
market_dictionary[7] = [14000.0,17000.0,0,.5,.07,.1, 0]
market_dictionary[8] = [15000.0,20000.0,0,.5,.07,.1, 0]
market_dictionary[9] = [5000.0,6000.0,0,.5,.1,.1, 0]
market_dictionary[10] = [13000.0,19500.0,0,.5,.075,.1, 0]
return market_dictionary
def myTradingSystem(DATE, OPEN, HIGH, LOW, CLOSE, VOL, exposure, equity, settings):
#initalzie the basics
nMarkets = CLOSE.shape[1]
pos = numpy.zeros(nMarkets)
i = 0
settings['countDays'] += 1
#setting the cpi multiplyer to get compare prices reltivlely
settings['CPI_muliplyer'] = (settings['BASE_CPI'] / settings['cpi_array'][ settings['count']])
# constantly get a new cpi every month by adding to count
if settings['countDays'] % 21 == 0:
settings['count'] += 1
#entering the pos
for i in range(nMarkets - 1):
if (CLOSE[-1, i] * settings['CPI_muliplyer']) <= settings['market_dictionary'][i][0]:
settings['market_dictionary'][i][2] = settings['market_dictionary'][i][3]
# pyramding to a falling posiiton - stage 1
if (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][4] * 5)) and settings['market_dictionary'][i][6] == 4):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][4] * 4)) and settings['market_dictionary'][i][6] == 3):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][4] * 3)) and settings['market_dictionary'][i][6] == 2):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][5] * 2)) and settings['market_dictionary'][i][6] == 1):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] / (1+settings['market_dictionary'][i][4])
and settings['market_dictionary'][i][6] == 0):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
#closing the position
if (CLOSE[-1, i] * settings['CPI_muliplyer']) >= settings['market_dictionary'][i][1]:
settings['market_dictionary'][i][2] = 0
settings['market_dictionary'][i][6] = 0
#set posistion to be returned equal to market dictionary value 2
for i in range(nMarkets - 1):
pos[i] = settings['market_dictionary'][i][2]
pos[11] = 11
return pos, settings
def mySettings():
''' Define your trading system settings here '''
settings = {}
# Futures Contracts
settings['markets'] = ['F_C', 'F_CC', 'F_CL', 'F_CT', 'F_FC','F_KC',
'F_LC', 'F_LN', 'F_NG', 'F_O', 'F_PA', 'CASH']
#`19900104 - 20170710
settings['beginInSample'] = '19900104'
#settings['endInSample'] = '20170710'
settings['lookback'] = 21
settings['budget'] = 10**6
settings['slippage'] = 0.05
settings['countDays'] = 0
settings['count'] = 0
settings['cpi_array'] = cpi_array()
settings['market_dictionary'] = market_dictionary()
settings['BASE_CPI'] = settings['cpi_array'][0]
settings['CPI_muliplyer'] = 0
return settings
# Evaluate trading system defined in current file.
if __name__ == '__main__':
import quantiacsToolbox
results = quantiacsToolbox.runts(__file__)
print(results['stats']) | [
"numpy.zeros",
"csv.reader",
"quantiacsToolbox.runts"
] | [((516, 532), 'numpy.zeros', 'numpy.zeros', (['(328)'], {}), '(328)\n', (527, 532), False, 'import numpy\n'), ((1860, 1881), 'numpy.zeros', 'numpy.zeros', (['nMarkets'], {}), '(nMarkets)\n', (1871, 1881), False, 'import numpy\n'), ((5505, 5537), 'quantiacsToolbox.runts', 'quantiacsToolbox.runts', (['__file__'], {}), '(__file__)\n', (5527, 5537), False, 'import quantiacsToolbox\n'), ((615, 634), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (625, 634), False, 'import csv\n')] |
import contextlib
import random
import string
from password_strength import PasswordStats
from redbot.core import commands
from redbot.core.utils import chat_formatting as cf
from .word_list import *
GREEN_CIRCLE = "\N{LARGE GREEN CIRCLE}"
YELLOW_CIRCLE = "\N{LARGE YELLOW CIRCLE}"
ORANGE_CIRCLE = "\N{LARGE ORANGE CIRCLE}"
RED_CIRCLE = "\N{LARGE RED CIRCLE}"
class Encryptor(commands.Cog):
"""
Create, and validify the strength of passwords.
"""
__author__ = ["Kreusada"]
__version__ = "1.1.0"
def __init__(self, bot):
self.bot = bot
def format_help_for_context(self, ctx: commands.Context) -> str:
context = super().format_help_for_context(ctx)
authors = ", ".join(self.__author__)
return f"{context}\n\nAuthor: {authors}\nVersion: {self.__version__}"
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete"""
return
def cog_unload(self):
with contextlib.suppress(Exception):
self.bot.remove_dev_env_value("encryptor")
async def initialize(self) -> None:
if 719988449867989142 in self.bot.owner_ids:
with contextlib.suppress(Exception):
self.bot.add_dev_env_value("encryptor", lambda x: self)
@commands.group()
async def password(self, ctx):
"""
Create, and validify the strength of passwords.
"""
pass
@password.group(name="generate")
async def password_generate(self, ctx):
"""Generate passwords."""
pass
@password_generate.command(name="complex")
async def password_generate_complex(self, ctx):
"""Generate a complex password."""
await ctx.send(
"".join(
random.choice(string.ascii_letters[:94]) for i in range(random.randint(20, 35))
)
)
@password_generate.command(name="strong")
async def password_generate_strong(self, ctx, delimeter: str = ""):
"""
Generate a strong password.
**Arguments**
* ``<delimeter>``: The character used to seperate each random word. Defaults to "-"
"""
d = delimeter
rc = random.choice
rr = random.randint
await ctx.send(
d.join(rc(RANDOM_WORDS).capitalize() for i in range(3)) + f"{d}{rr(1,1000)}"
)
@password.command(name="strength")
async def password_strength(self, ctx, password: str):
"""Validate a passwords strength."""
conv = PasswordStats(password)
converter = conv.strength()
if converter < 0.250:
emoji = RED_CIRCLE
text = "This is a **weak** password."
elif converter > 0.250 and converter < 0.500:
emoji = ORANGE_CIRCLE
text = "This is an **okay** password."
elif converter > 0.500 and converter < 0.750:
emoji = YELLOW_CIRCLE
text = "This is a **good** password!"
else:
emoji = GREEN_CIRCLE
text = "This is an **excellent** password!"
await ctx.maybe_send_embed(
f"**Strength rating: {round(converter * 100)}%** {emoji}\n{cf.quote(text)}"
)
| [
"random.choice",
"redbot.core.utils.chat_formatting.quote",
"password_strength.PasswordStats",
"contextlib.suppress",
"redbot.core.commands.group",
"random.randint"
] | [((1274, 1290), 'redbot.core.commands.group', 'commands.group', ([], {}), '()\n', (1288, 1290), False, 'from redbot.core import commands\n'), ((2510, 2533), 'password_strength.PasswordStats', 'PasswordStats', (['password'], {}), '(password)\n', (2523, 2533), False, 'from password_strength import PasswordStats\n'), ((966, 996), 'contextlib.suppress', 'contextlib.suppress', (['Exception'], {}), '(Exception)\n', (985, 996), False, 'import contextlib\n'), ((1164, 1194), 'contextlib.suppress', 'contextlib.suppress', (['Exception'], {}), '(Exception)\n', (1183, 1194), False, 'import contextlib\n'), ((1752, 1792), 'random.choice', 'random.choice', (['string.ascii_letters[:94]'], {}), '(string.ascii_letters[:94])\n', (1765, 1792), False, 'import random\n'), ((3168, 3182), 'redbot.core.utils.chat_formatting.quote', 'cf.quote', (['text'], {}), '(text)\n', (3176, 3182), True, 'from redbot.core.utils import chat_formatting as cf\n'), ((1808, 1830), 'random.randint', 'random.randint', (['(20)', '(35)'], {}), '(20, 35)\n', (1822, 1830), False, 'import random\n')] |
import numpy as np
gama = 0.5
alfa = 0.75
data = np.array([[1, 1, 1], [1, 2, -1], [2, 1, 1]]) #(s, s', R)
Q = np.zeros((data.shape[0]+1, 2)) #(iterations, |S|)
k = 1
for d in range(data.shape[0]):
R = data[d, 2] #inmediate reward
idx_s = data[d, 0] - 1 # index of state s in Q
idx_sp = data[d, 1] - 1 #index of state s' in Q
# Q[k, idx_s] = (1 - alfa) * Q[k - 1, idx_s] + alfa * (R + gama * np.max(Q[0:k, idx_sp]))
Q[k, idx_s] = (1 - alfa) * Q[k - 1, idx_s] + alfa * (R + gama * Q[k-1, idx_sp])
k += 1
print(Q) | [
"numpy.array",
"numpy.zeros"
] | [((50, 94), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 2, -1], [2, 1, 1]]'], {}), '([[1, 1, 1], [1, 2, -1], [2, 1, 1]])\n', (58, 94), True, 'import numpy as np\n'), ((111, 143), 'numpy.zeros', 'np.zeros', (['(data.shape[0] + 1, 2)'], {}), '((data.shape[0] + 1, 2))\n', (119, 143), True, 'import numpy as np\n')] |
import os
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from torchvision.utils import save_image
from utils import get_lr_scheduler, sample_images, inference
# Reproducibility #
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Device Configuration #
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def train_srcnns(train_loader, val_loader, model, device, args):
# Loss Function #
criterion = nn.L1Loss()
# Optimizers #
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.5, 0.999))
optimizer_scheduler = get_lr_scheduler(optimizer=optimizer, args=args)
# Lists #
losses = list()
# Train #
print("Training {} started with total epoch of {}.".format(str(args.model).upper(), args.num_epochs))
for epoch in range(args.num_epochs):
for i, (high, low) in enumerate(train_loader):
# Data Preparation #
high = high.to(device)
low = low.to(device)
# Forward Data #
generated = model(low)
# Calculate Loss #
loss = criterion(generated, high)
# Initialize Optimizer #
optimizer.zero_grad()
# Back Propagation and Update #
loss.backward()
optimizer.step()
# Add items to Lists #
losses.append(loss.item())
# Print Statistics #
if (i+1) % args.print_every == 0:
print("{} | Epoch [{}/{}] | Iterations [{}/{}] | Loss {:.4f}"
.format(str(args.model).upper(), epoch+1, args.num_epochs, i+1, len(train_loader), np.average(losses)))
# Save Sample Images #
sample_images(val_loader, args.batch_size, args.upscale_factor, model, epoch, args.samples_path, device)
# Adjust Learning Rate #
optimizer_scheduler.step()
# Save Model Weights and Inference #
if (epoch+1) % args.save_every == 0:
torch.save(model.state_dict(), os.path.join(args.weights_path, '{}_Epoch_{}.pkl'.format(model.__class__.__name__, epoch+1)))
inference(val_loader, model, args.upscale_factor, epoch, args.inference_path, device) | [
"numpy.average",
"utils.inference",
"torch.nn.L1Loss",
"utils.get_lr_scheduler",
"torch.cuda.is_available",
"utils.sample_images",
"warnings.filterwarnings"
] | [((45, 78), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (68, 78), False, 'import warnings\n'), ((365, 390), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (388, 390), False, 'import torch\n'), ((508, 519), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (517, 519), True, 'import torch.nn as nn\n'), ((651, 699), 'utils.get_lr_scheduler', 'get_lr_scheduler', ([], {'optimizer': 'optimizer', 'args': 'args'}), '(optimizer=optimizer, args=args)\n', (667, 699), False, 'from utils import get_lr_scheduler, sample_images, inference\n'), ((2201, 2291), 'utils.inference', 'inference', (['val_loader', 'model', 'args.upscale_factor', 'epoch', 'args.inference_path', 'device'], {}), '(val_loader, model, args.upscale_factor, epoch, args.\n inference_path, device)\n', (2210, 2291), False, 'from utils import get_lr_scheduler, sample_images, inference\n'), ((1787, 1895), 'utils.sample_images', 'sample_images', (['val_loader', 'args.batch_size', 'args.upscale_factor', 'model', 'epoch', 'args.samples_path', 'device'], {}), '(val_loader, args.batch_size, args.upscale_factor, model,\n epoch, args.samples_path, device)\n', (1800, 1895), False, 'from utils import get_lr_scheduler, sample_images, inference\n'), ((1710, 1728), 'numpy.average', 'np.average', (['losses'], {}), '(losses)\n', (1720, 1728), True, 'import numpy as np\n')] |
##############################
## MFP_K1000.py ##
## <NAME> ##
## Version 2020.03.25 ##
##############################
import os
import os.path as osp
import time
import subprocess as spc
import numpy as np
import scipy as sp
import astropy.io.fits as fits
import healpy as hp
import treecorr as tree
import commonFunctions as cf
import HEALPixFunctions as hpf
################################################################################
## Parameters
class Parameters:
KiDSPath = 'data/KiDS/'
dataPath = 'data/mockFootprint/'
absDataPath = '/disk05/calin/91_Data/mockFootprint/'
## Mask parameters
area_BOSS = 9329 ## [deg^2]
area_BOSS_reduced = 1274.319868 ## From my own calculations
area_BOSS_wcs = 408.321
area_BOSS_4Band = 339.298
area_BOSS_9Band = 319.506
area_2dFLenS_SGP = 510.803964 ## [deg^2]
area_2dFLenS_wcs = 424.508017
area_2dFLenS_gri = 355.283139
area_2dFLenS_9Band = 341.888289
area_KiDS = 773.286 ## [deg^2]
area_KiDS_North = 334.138
area_KiDS_South = 439.148
area_KiDS_North_new = 371.801
area_KiDS_South_new = 401.485
## Galaxy number density
n_gal_BOSS_reduced_z0 = 0.014496
n_gal_BOSS_reduced_z1 = 0.016595
n_gal_BOSS_wcs_z0 = 0.014437
n_gal_BOSS_wcs_z1 = 0.016265
n_gal_2dFLenS_SGP_z0 = 0.005813
n_gal_2dFLenS_SGP_z1 = 0.006067
n_gal_2dFLenS_wcs_z0 = 0.005857
n_gal_2dFLenS_wcs_z1 = 0.006031
n_gal_2dFLenS_gri_z0 = 0.002891
n_gal_2dFLenS_gri_z1 = 0.003677
################################################################################
## Functions related to masks - I
## This function load BOSS random catalogues
def loadFitsLenCat(surveyTag, zInd, bitMaskTag='reduced'):
P = Parameters()
if bitMaskTag in ['all', 'reduced', 'SGP']: ## No selection
bitMask = 000000
elif bitMaskTag == 'wcs': ## KiDS wcs
bitMask = 0x4000
elif bitMaskTag == 'gri':
bitMask = 0x6FFC ## KiDS gri overlap
elif bitMaskTag == '9Band':
bitMask = 0x681C ## KiDS 9-band overlap
else:
raise ValueError('Bad bit mask option: \"%s\"' % bitMaskTag)
name = '%sKiDS-1000_GGLCATS/%s_z%d.fits' % (P.KiDSPath, surveyTag, zInd+1)
data = fits.getdata(name, 1)
print('Loaded \"%s\"' % name)
flag = data.field('KIDSMASK')
ind = np.logical_not(np.array(flag.astype(int) & bitMask, dtype=bool))
return data[ind]
## This function loads BOSS random catalogues & pour them onto a HEALPix map.
def saveFitsCountMap_BOSS(nside, bitMaskTag='wcs'):
P = Parameters()
nbPix = 12 * nside * nside
full = np.zeros(nbPix, dtype=int)
## Fill catalogues
for zInd in range(2):
data = loadFitsLenCat('BOSS_random', zInd, bitMaskTag=bitMaskTag)
RA = data.field('ALPHA_J2000')
DEC = data.field('DELTA_J2000')
pix = hpf.RADECToPatch(nside, RA, DEC)
for i in pix:
full[i] += 1
## Save
name = '%sKiDS-1000_for_mocks/countMap_BOSS_%s_nside%d.fits' % (P.KiDSPath, bitMaskTag, nside)
hpf.saveFitsFullMap(name, full, verbose=True)
return
def saveFitsCountMap_overlap(surveyTag_K, surveyTag_L, nside_L):
P = Parameters()
nside_K = 4096
name = '%sKiDS-1000_for_mocks/countMap_%s_nside%d.fits' % (P.KiDSPath, surveyTag_L, nside_L)
count_L = hpf.loadFitsFullMap(name)
count_L = hpf.increaseResolution(count_L, nside_K)
name = '%sKiDS-1000_for_mocks/mask_%s_fromArea_nside%d.fits' % (P.KiDSPath, surveyTag_K, nside_K)
mask_K = hpf.loadFitsFullMap(name)
ind = mask_K.astype(bool)
del mask_K
count_L[~ind] = 0
del ind
## Save
surveyTag_o = 'BOSS_KiDS_overlap' if 'BOSS' in surveyTag_L else '2dFLenS_KiDS_overlap'
name = '%sKiDS-1000_for_mocks/countMap_%s_nside%d.fits' % (P.KiDSPath, surveyTag_o, nside_K)
hpf.saveFitsFullMap(name, count_L)
del count_L
return
## 'BOSS_wcs' is called
def saveFitsMask_fromCountMap(surveyTag):
P = Parameters()
if surveyTag == 'BOSS_reduced':
nside = 2048
elif surveyTag == 'BOSS_wcs':
nside = 2048
elif surveyTag == '2dFLenS_SGP':
nside = 4096
elif surveyTag == '2dFLenS_wcs':
nside = 4096
else:
raise NotImplementedError('surveyTag = \"%s\" not implemented' % surveyTag)
name = '%sKiDS-1000_for_mocks/countMap_%s_nside%d.fits' % (P.KiDSPath, surveyTag, nside)
mask = hpf.loadFitsFullMap(name)
mask = np.fmin(mask, 1)
if nside == 2048:
nside2 = 4096
mask = hpf.increaseResolution(mask, nside2)
name = '%sKiDS-1000_for_mocks/mask_%s_fromCountMap2048_nside%d.fits' % (P.KiDSPath, surveyTag, nside2)
hpf.saveFitsFullMap(name, mask)
return
## Save
name = '%sKiDS-1000_for_mocks/mask_%s_fromCountMap_nside%d.fits' % (P.KiDSPath, surveyTag, nside)
hpf.saveFitsFullMap(name, mask)
return
# This function combines the 2dFLenS mask and BOSS mask into one
def saveFitsLensMask():
P = Parameters()
name = '%sKiDS-1000_for_mocks/mask_BOSS_wcs_fromCountMap2048_nside4096.fits' % P.KiDSPath
mask_B = hpf.loadFitsFullMap(name)
name = '%sKiDS-1000_for_mocks/mask_2dFLenS_wcs_fromCountMap_nside4096.fits' % P.KiDSPath
mask_2 = hpf.loadFitsFullMap(name)
mask_L = mask_B + mask_2
mask_L = np.fmin(mask_L, 1)
name = '%sKiDS-1000_for_mocks/mask_BOSS_2dFLenS_wcs_nside4096.fits' % P.KiDSPath
hpf.saveFitsFullMap(name, mask_L)
return
## Then I called the following & used the output of the 2nd line
## saveFitsCountMap_BOSS(2048, 'wcs') ## Need external
## saveFitsMask_fromCountMap('BOSS_wcs')
###############################################################################
| [
"HEALPixFunctions.increaseResolution",
"HEALPixFunctions.loadFitsFullMap",
"HEALPixFunctions.saveFitsFullMap",
"astropy.io.fits.getdata",
"numpy.zeros",
"HEALPixFunctions.RADECToPatch",
"numpy.fmin"
] | [((2281, 2302), 'astropy.io.fits.getdata', 'fits.getdata', (['name', '(1)'], {}), '(name, 1)\n', (2293, 2302), True, 'import astropy.io.fits as fits\n'), ((2653, 2679), 'numpy.zeros', 'np.zeros', (['nbPix'], {'dtype': 'int'}), '(nbPix, dtype=int)\n', (2661, 2679), True, 'import numpy as np\n'), ((3065, 3110), 'HEALPixFunctions.saveFitsFullMap', 'hpf.saveFitsFullMap', (['name', 'full'], {'verbose': '(True)'}), '(name, full, verbose=True)\n', (3084, 3110), True, 'import HEALPixFunctions as hpf\n'), ((3332, 3357), 'HEALPixFunctions.loadFitsFullMap', 'hpf.loadFitsFullMap', (['name'], {}), '(name)\n', (3351, 3357), True, 'import HEALPixFunctions as hpf\n'), ((3370, 3410), 'HEALPixFunctions.increaseResolution', 'hpf.increaseResolution', (['count_L', 'nside_K'], {}), '(count_L, nside_K)\n', (3392, 3410), True, 'import HEALPixFunctions as hpf\n'), ((3525, 3550), 'HEALPixFunctions.loadFitsFullMap', 'hpf.loadFitsFullMap', (['name'], {}), '(name)\n', (3544, 3550), True, 'import HEALPixFunctions as hpf\n'), ((3827, 3861), 'HEALPixFunctions.saveFitsFullMap', 'hpf.saveFitsFullMap', (['name', 'count_L'], {}), '(name, count_L)\n', (3846, 3861), True, 'import HEALPixFunctions as hpf\n'), ((4366, 4391), 'HEALPixFunctions.loadFitsFullMap', 'hpf.loadFitsFullMap', (['name'], {}), '(name)\n', (4385, 4391), True, 'import HEALPixFunctions as hpf\n'), ((4401, 4417), 'numpy.fmin', 'np.fmin', (['mask', '(1)'], {}), '(mask, 1)\n', (4408, 4417), True, 'import numpy as np\n'), ((4776, 4807), 'HEALPixFunctions.saveFitsFullMap', 'hpf.saveFitsFullMap', (['name', 'mask'], {}), '(name, mask)\n', (4795, 4807), True, 'import HEALPixFunctions as hpf\n'), ((5037, 5062), 'HEALPixFunctions.loadFitsFullMap', 'hpf.loadFitsFullMap', (['name'], {}), '(name)\n', (5056, 5062), True, 'import HEALPixFunctions as hpf\n'), ((5171, 5196), 'HEALPixFunctions.loadFitsFullMap', 'hpf.loadFitsFullMap', (['name'], {}), '(name)\n', (5190, 5196), True, 'import HEALPixFunctions as hpf\n'), ((5264, 5282), 'numpy.fmin', 'np.fmin', (['mask_L', '(1)'], {}), '(mask_L, 1)\n', (5271, 5282), True, 'import numpy as np\n'), ((5407, 5440), 'HEALPixFunctions.saveFitsFullMap', 'hpf.saveFitsFullMap', (['name', 'mask_L'], {}), '(name, mask_L)\n', (5426, 5440), True, 'import HEALPixFunctions as hpf\n'), ((2883, 2915), 'HEALPixFunctions.RADECToPatch', 'hpf.RADECToPatch', (['nside', 'RA', 'DEC'], {}), '(nside, RA, DEC)\n', (2899, 2915), True, 'import HEALPixFunctions as hpf\n'), ((4470, 4506), 'HEALPixFunctions.increaseResolution', 'hpf.increaseResolution', (['mask', 'nside2'], {}), '(mask, nside2)\n', (4492, 4506), True, 'import HEALPixFunctions as hpf\n'), ((4618, 4649), 'HEALPixFunctions.saveFitsFullMap', 'hpf.saveFitsFullMap', (['name', 'mask'], {}), '(name, mask)\n', (4637, 4649), True, 'import HEALPixFunctions as hpf\n')] |
from django.contrib.auth.models import User
from mecc.apps.years.models import UniversityYear
class UsefullDisplay(object):
def process_request(self, request):
# always sent real user in order e.g. to display last first name
if request.session.get('is_spoofed_user'):
u = User.objects.get(username=request.session['real_username'])
else:
u = request.user
request.display = {'user': u}
# give current year
y = UniversityYear.objects.filter(is_target_year=True).first()
c = "%s/%s" % (y.code_year, y.code_year + 1) if y is not None else ":\
aucune année selectionnée"
request.display.update({'current_year': c})
def process_response(self, request, response):
return response
| [
"django.contrib.auth.models.User.objects.get",
"mecc.apps.years.models.UniversityYear.objects.filter"
] | [((307, 366), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': "request.session['real_username']"}), "(username=request.session['real_username'])\n", (323, 366), False, 'from django.contrib.auth.models import User\n'), ((489, 539), 'mecc.apps.years.models.UniversityYear.objects.filter', 'UniversityYear.objects.filter', ([], {'is_target_year': '(True)'}), '(is_target_year=True)\n', (518, 539), False, 'from mecc.apps.years.models import UniversityYear\n')] |
#!/usr/bin/env python
# Copyright (c) 2019, Solitude Developers
#
# This source code is licensed under the BSD-3-Clause license found in the
# COPYING file in the root directory of this source tree
from typing import List, Tuple # noqa
import sys
import os
import argparse
import datetime
import binascii
import json
from solitude.common import (update_global_config, read_yaml_or_json, read_config_file)
from solitude.common.errors import CLIError
def _update_global_config_from_file(path):
cfg_from_file = read_yaml_or_json(path)
update_global_config(cfg_from_file)
def txhash_type(txhash):
try:
if not txhash.startswith("0x"):
raise ValueError()
return binascii.unhexlify(txhash[2:])
except ValueError:
raise CLIError("TXHASH format must be a hex string prefixed with 0x")
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-g", "--global-config", dest="global_config", type=str,
default="resource://global_config.json",
help="Global configuration file")
parser.add_argument(
"-c", "--config", type=str,
default="./solitude.yaml",
help="Project configuration file")
sub = parser.add_subparsers()
# create subparsers
p_init = sub.add_parser("init")
p_install = sub.add_parser("install")
p_compile = sub.add_parser("compile")
p_debug = sub.add_parser("debug")
p_trace = sub.add_parser("trace")
p_lint = sub.add_parser("lint")
p_server = sub.add_parser("server")
def module_init():
from solitude._commandline import cmd_init
return cmd_init
p_init.set_defaults(module=module_init)
def module_install():
from solitude._commandline import cmd_install
return cmd_install
p_install.set_defaults(module=module_install)
def module_compile():
from solitude._commandline import cmd_compile
return cmd_compile
p_compile.set_defaults(module=module_compile)
def module_debug():
from solitude._commandline import cmd_debug
return cmd_debug
p_debug.set_defaults(module=module_debug)
p_debug.add_argument(
"txhash", type=txhash_type,
help="Transaction hash, a hex string prefixed with 0x")
p_debug.add_argument(
"--eval-command", "-ex", action="append", help="Execute command at start", dest="ex")
def module_trace():
from solitude._commandline import cmd_trace
return cmd_trace
p_trace.set_defaults(module=module_trace)
p_trace.add_argument("txhash", type=txhash_type)
p_trace.add_argument("--variables", action="store_true")
p_trace.add_argument("--frames", action="store_true")
p_trace.add_argument("--stack", action="store_true")
p_trace.add_argument("--memory", action="store_true")
p_trace.add_argument("--storage", action="store_true")
def module_lint():
from solitude._commandline import cmd_lint
return cmd_lint
p_lint.set_defaults(module=module_lint)
p_lint.add_argument(
"--report",
help="Path to report (enable report output mode)")
p_lint.add_argument(
"--report-template", dest="report_template",
help="Path to report template",
default="resource://report.filemessage.default.html")
def module_server():
from solitude._commandline import cmd_server
return cmd_server
p_server.set_defaults(module=module_server)
p_server.add_argument(
"--port", type=int, default=0, help="Override server port")
return parser
def main():
parser = create_parser()
args = parser.parse_args()
if not hasattr(args, "module"):
parser.print_help()
return 1
try:
_update_global_config_from_file(args.global_config)
module = args.module()
module.main(args)
except CLIError as e:
print("Error: %s" % str(e), file=sys.stderr)
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"solitude.common.errors.CLIError",
"argparse.ArgumentParser",
"solitude.common.read_yaml_or_json",
"solitude.common.update_global_config",
"binascii.unhexlify"
] | [((518, 541), 'solitude.common.read_yaml_or_json', 'read_yaml_or_json', (['path'], {}), '(path)\n', (535, 541), False, 'from solitude.common import update_global_config, read_yaml_or_json, read_config_file\n'), ((546, 581), 'solitude.common.update_global_config', 'update_global_config', (['cfg_from_file'], {}), '(cfg_from_file)\n', (566, 581), False, 'from solitude.common import update_global_config, read_yaml_or_json, read_config_file\n'), ((872, 897), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (895, 897), False, 'import argparse\n'), ((704, 734), 'binascii.unhexlify', 'binascii.unhexlify', (['txhash[2:]'], {}), '(txhash[2:])\n', (722, 734), False, 'import binascii\n'), ((772, 835), 'solitude.common.errors.CLIError', 'CLIError', (['"""TXHASH format must be a hex string prefixed with 0x"""'], {}), "('TXHASH format must be a hex string prefixed with 0x')\n", (780, 835), False, 'from solitude.common.errors import CLIError\n')] |
from pypy.rpython.lltypesystem import rffi, lltype
from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void
from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP,
PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP,
Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE)
from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref
from pypy.module.cpyext.modsupport import PyMethodDef
P, FT, PyO = Ptr, FuncType, PyObject
PyOPtr = Ptr(lltype.Array(PyO, hints={'nolength': True}))
freefunc = P(FT([rffi.VOIDP], Void))
destructor = P(FT([PyO], Void))
printfunc = P(FT([PyO, FILEP, rffi.INT_real], rffi.INT))
getattrfunc = P(FT([PyO, rffi.CCHARP], PyO))
getattrofunc = P(FT([PyO, PyO], PyO))
setattrfunc = P(FT([PyO, rffi.CCHARP, PyO], rffi.INT_real))
setattrofunc = P(FT([PyO, PyO, PyO], rffi.INT_real))
cmpfunc = P(FT([PyO, PyO], rffi.INT_real))
reprfunc = P(FT([PyO], PyO))
hashfunc = P(FT([PyO], lltype.Signed))
richcmpfunc = P(FT([PyO, PyO, rffi.INT_real], PyO))
getiterfunc = P(FT([PyO], PyO))
iternextfunc = P(FT([PyO], PyO))
descrgetfunc = P(FT([PyO, PyO, PyO], PyO))
descrsetfunc = P(FT([PyO, PyO, PyO], rffi.INT_real))
initproc = P(FT([PyO, PyO, PyO], rffi.INT_real))
newfunc = P(FT([PyTypeObjectPtr, PyO, PyO], PyO))
allocfunc = P(FT([PyTypeObjectPtr, Py_ssize_t], PyO))
unaryfunc = P(FT([PyO], PyO))
binaryfunc = P(FT([PyO, PyO], PyO))
ternaryfunc = P(FT([PyO, PyO, PyO], PyO))
inquiry = P(FT([PyO], rffi.INT_real))
lenfunc = P(FT([PyO], Py_ssize_t))
coercion = P(FT([PyOPtr, PyOPtr], rffi.INT_real))
intargfunc = P(FT([PyO, rffi.INT_real], PyO))
intintargfunc = P(FT([PyO, rffi.INT_real, rffi.INT], PyO))
ssizeargfunc = P(FT([PyO, Py_ssize_t], PyO))
ssizessizeargfunc = P(FT([PyO, Py_ssize_t, Py_ssize_t], PyO))
intobjargproc = P(FT([PyO, rffi.INT_real, PyO], rffi.INT))
intintobjargproc = P(FT([PyO, rffi.INT_real, rffi.INT, PyO], rffi.INT))
ssizeobjargproc = P(FT([PyO, Py_ssize_t, PyO], rffi.INT_real))
ssizessizeobjargproc = P(FT([PyO, Py_ssize_t, Py_ssize_t, PyO], rffi.INT_real))
objobjargproc = P(FT([PyO, PyO, PyO], rffi.INT_real))
objobjproc = P(FT([PyO, PyO], rffi.INT_real))
visitproc = P(FT([PyO, rffi.VOIDP], rffi.INT_real))
traverseproc = P(FT([PyO, visitproc, rffi.VOIDP], rffi.INT_real))
getter = P(FT([PyO, rffi.VOIDP], PyO))
setter = P(FT([PyO, PyO, rffi.VOIDP], rffi.INT_real))
wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO))
wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO))
readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t))
writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t))
segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t))
charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t))
## We don't support new buffer interface for now
getbufferproc = rffi.VOIDP
releasebufferproc = rffi.VOIDP
PyGetSetDef = cpython_struct("PyGetSetDef", (
("name", rffi.CCHARP),
("get", getter),
("set", setter),
("doc", rffi.CCHARP),
("closure", rffi.VOIDP),
))
PyNumberMethods = cpython_struct("PyNumberMethods", (
("nb_add", binaryfunc),
("nb_subtract", binaryfunc),
("nb_multiply", binaryfunc),
("nb_divide", binaryfunc),
("nb_remainder", binaryfunc),
("nb_divmod", binaryfunc),
("nb_power", ternaryfunc),
("nb_negative", unaryfunc),
("nb_positive", unaryfunc),
("nb_absolute", unaryfunc),
("nb_nonzero", inquiry),
("nb_invert", unaryfunc),
("nb_lshift", binaryfunc),
("nb_rshift", binaryfunc),
("nb_and", binaryfunc),
("nb_xor", binaryfunc),
("nb_or", binaryfunc),
("nb_coerce", coercion),
("nb_int", unaryfunc),
("nb_long", unaryfunc),
("nb_float", unaryfunc),
("nb_oct", unaryfunc),
("nb_hex", unaryfunc),
("nb_inplace_add", binaryfunc),
("nb_inplace_subtract", binaryfunc),
("nb_inplace_multiply", binaryfunc),
("nb_inplace_divide", binaryfunc),
("nb_inplace_remainder", binaryfunc),
("nb_inplace_power", ternaryfunc),
("nb_inplace_lshift", binaryfunc),
("nb_inplace_rshift", binaryfunc),
("nb_inplace_and", binaryfunc),
("nb_inplace_xor", binaryfunc),
("nb_inplace_or", binaryfunc),
("nb_floor_divide", binaryfunc),
("nb_true_divide", binaryfunc),
("nb_inplace_floor_divide", binaryfunc),
("nb_inplace_true_divide", binaryfunc),
("nb_index", unaryfunc),
))
PySequenceMethods = cpython_struct("PySequenceMethods", (
("sq_length", lenfunc),
("sq_concat", binaryfunc),
("sq_repeat", ssizeargfunc),
("sq_item", ssizeargfunc),
("sq_slice", ssizessizeargfunc),
("sq_ass_item", ssizeobjargproc),
("sq_ass_slice", ssizessizeobjargproc),
("sq_contains", objobjproc),
("sq_inplace_concat", binaryfunc),
("sq_inplace_repeat", ssizeargfunc),
))
PyMappingMethods = cpython_struct("PyMappingMethods", (
("mp_length", lenfunc),
("mp_subscript", binaryfunc),
("mp_ass_subscript", objobjargproc),
))
PyBufferProcs = cpython_struct("PyBufferProcs", (
("bf_getreadbuffer", readbufferproc),
("bf_getwritebuffer", writebufferproc),
("bf_getsegcount", segcountproc),
("bf_getcharbuffer", charbufferproc),
("bf_getbuffer", getbufferproc),
("bf_releasebuffer", releasebufferproc),
))
PyMemberDef = cpython_struct("PyMemberDef", (
("name", rffi.CCHARP),
("type", rffi.INT_real),
("offset", Py_ssize_t),
("flags", rffi.INT_real),
("doc", rffi.CCHARP),
))
# These fields are supported and used in different ways
# The following comments mean:
# #E essential, initialized for all PTOs
# #S supported
# #U unsupported
# #N not yet implemented
PyTypeObjectFields = []
PyTypeObjectFields.extend(PyVarObjectFields)
PyTypeObjectFields.extend([
("tp_name", rffi.CCHARP), #E For printing, in format "<module>.<name>"
("tp_basicsize", Py_ssize_t), #E For allocation
("tp_itemsize", Py_ssize_t), #E "
# Methods to implement standard operations
("tp_dealloc", destructor), #E
("tp_print", printfunc), #U
("tp_getattr", getattrfunc), #U
("tp_setattr", setattrfunc), #U
("tp_compare", cmpfunc), #N
("tp_repr", reprfunc), #N
# Method suites for standard classes
("tp_as_number", Ptr(PyNumberMethods)), #N
("tp_as_sequence", Ptr(PySequenceMethods)), #N
("tp_as_mapping", Ptr(PyMappingMethods)), #N
# More standard operations (here for binary compatibility)
("tp_hash", hashfunc), #N
("tp_call", ternaryfunc), #N
("tp_str", reprfunc), #N
("tp_getattro", getattrofunc),#N
("tp_setattro", setattrofunc),#N
# Functions to access object as input/output buffer
("tp_as_buffer", Ptr(PyBufferProcs)), #U
# Flags to define presence of optional/expanded features
("tp_flags", lltype.Signed), #E
("tp_doc", rffi.CCHARP), #N Documentation string
# Assigned meaning in release 2.0
# call function for all accessible objects
("tp_traverse", traverseproc),#U
# delete references to contained objects
("tp_clear", inquiry), #U
# Assigned meaning in release 2.1
# rich comparisons
("tp_richcompare", richcmpfunc), #N
# weak reference enabler
("tp_weaklistoffset", Py_ssize_t), #U
# Added in release 2.2
# Iterators
("tp_iter", getiterfunc), #N
("tp_iternext", iternextfunc), #N
# Attribute descriptor and subclassing stuff
("tp_methods", Ptr(PyMethodDef)), #S
("tp_members", Ptr(PyMemberDef)), #S
("tp_getset", Ptr(PyGetSetDef)), #S
("tp_base", Ptr(PyTypeObject)), #E
("tp_dict", PyObject), #U
("tp_descr_get", descrgetfunc), #N
("tp_descr_set", descrsetfunc), #N
("tp_dictoffset", Py_ssize_t), #U
("tp_init", initproc), #N
("tp_alloc", allocfunc), #N
("tp_new", newfunc), #S
("tp_free", freefunc), #E Low-level free-memory routine
("tp_is_gc", inquiry), #U For PyObject_IS_GC
("tp_bases", PyObject),#E
("tp_mro", PyObject), #U method resolution order
("tp_cache", PyObject),#S
("tp_subclasses", PyObject), #U
("tp_weaklist", PyObject), #U
("tp_del", destructor), #N
])
cpython_struct("PyTypeObject", PyTypeObjectFields, PyTypeObject)
| [
"pypy.module.cpyext.api.cpython_struct",
"pypy.rpython.lltypesystem.lltype.Ptr",
"pypy.rpython.lltypesystem.lltype.Array"
] | [((2857, 2997), 'pypy.module.cpyext.api.cpython_struct', 'cpython_struct', (['"""PyGetSetDef"""', "(('name', rffi.CCHARP), ('get', getter), ('set', setter), ('doc', rffi.\n CCHARP), ('closure', rffi.VOIDP))"], {}), "('PyGetSetDef', (('name', rffi.CCHARP), ('get', getter), (\n 'set', setter), ('doc', rffi.CCHARP), ('closure', rffi.VOIDP)))\n", (2871, 2997), False, 'from pypy.module.cpyext.api import cpython_struct, Py_ssize_t, Py_ssize_tP, PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE\n'), ((3035, 4288), 'pypy.module.cpyext.api.cpython_struct', 'cpython_struct', (['"""PyNumberMethods"""', "(('nb_add', binaryfunc), ('nb_subtract', binaryfunc), ('nb_multiply',\n binaryfunc), ('nb_divide', binaryfunc), ('nb_remainder', binaryfunc), (\n 'nb_divmod', binaryfunc), ('nb_power', ternaryfunc), ('nb_negative',\n unaryfunc), ('nb_positive', unaryfunc), ('nb_absolute', unaryfunc), (\n 'nb_nonzero', inquiry), ('nb_invert', unaryfunc), ('nb_lshift',\n binaryfunc), ('nb_rshift', binaryfunc), ('nb_and', binaryfunc), (\n 'nb_xor', binaryfunc), ('nb_or', binaryfunc), ('nb_coerce', coercion),\n ('nb_int', unaryfunc), ('nb_long', unaryfunc), ('nb_float', unaryfunc),\n ('nb_oct', unaryfunc), ('nb_hex', unaryfunc), ('nb_inplace_add',\n binaryfunc), ('nb_inplace_subtract', binaryfunc), (\n 'nb_inplace_multiply', binaryfunc), ('nb_inplace_divide', binaryfunc),\n ('nb_inplace_remainder', binaryfunc), ('nb_inplace_power', ternaryfunc),\n ('nb_inplace_lshift', binaryfunc), ('nb_inplace_rshift', binaryfunc), (\n 'nb_inplace_and', binaryfunc), ('nb_inplace_xor', binaryfunc), (\n 'nb_inplace_or', binaryfunc), ('nb_floor_divide', binaryfunc), (\n 'nb_true_divide', binaryfunc), ('nb_inplace_floor_divide', binaryfunc),\n ('nb_inplace_true_divide', binaryfunc), ('nb_index', unaryfunc))"], {}), "('PyNumberMethods', (('nb_add', binaryfunc), ('nb_subtract',\n binaryfunc), ('nb_multiply', binaryfunc), ('nb_divide', binaryfunc), (\n 'nb_remainder', binaryfunc), ('nb_divmod', binaryfunc), ('nb_power',\n ternaryfunc), ('nb_negative', unaryfunc), ('nb_positive', unaryfunc), (\n 'nb_absolute', unaryfunc), ('nb_nonzero', inquiry), ('nb_invert',\n unaryfunc), ('nb_lshift', binaryfunc), ('nb_rshift', binaryfunc), (\n 'nb_and', binaryfunc), ('nb_xor', binaryfunc), ('nb_or', binaryfunc), (\n 'nb_coerce', coercion), ('nb_int', unaryfunc), ('nb_long', unaryfunc),\n ('nb_float', unaryfunc), ('nb_oct', unaryfunc), ('nb_hex', unaryfunc),\n ('nb_inplace_add', binaryfunc), ('nb_inplace_subtract', binaryfunc), (\n 'nb_inplace_multiply', binaryfunc), ('nb_inplace_divide', binaryfunc),\n ('nb_inplace_remainder', binaryfunc), ('nb_inplace_power', ternaryfunc),\n ('nb_inplace_lshift', binaryfunc), ('nb_inplace_rshift', binaryfunc), (\n 'nb_inplace_and', binaryfunc), ('nb_inplace_xor', binaryfunc), (\n 'nb_inplace_or', binaryfunc), ('nb_floor_divide', binaryfunc), (\n 'nb_true_divide', binaryfunc), ('nb_inplace_floor_divide', binaryfunc),\n ('nb_inplace_true_divide', binaryfunc), ('nb_index', unaryfunc)))\n", (3049, 4288), False, 'from pypy.module.cpyext.api import cpython_struct, Py_ssize_t, Py_ssize_tP, PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE\n'), ((4399, 4770), 'pypy.module.cpyext.api.cpython_struct', 'cpython_struct', (['"""PySequenceMethods"""', "(('sq_length', lenfunc), ('sq_concat', binaryfunc), ('sq_repeat',\n ssizeargfunc), ('sq_item', ssizeargfunc), ('sq_slice',\n ssizessizeargfunc), ('sq_ass_item', ssizeobjargproc), ('sq_ass_slice',\n ssizessizeobjargproc), ('sq_contains', objobjproc), (\n 'sq_inplace_concat', binaryfunc), ('sq_inplace_repeat', ssizeargfunc))"], {}), "('PySequenceMethods', (('sq_length', lenfunc), ('sq_concat',\n binaryfunc), ('sq_repeat', ssizeargfunc), ('sq_item', ssizeargfunc), (\n 'sq_slice', ssizessizeargfunc), ('sq_ass_item', ssizeobjargproc), (\n 'sq_ass_slice', ssizessizeobjargproc), ('sq_contains', objobjproc), (\n 'sq_inplace_concat', binaryfunc), ('sq_inplace_repeat', ssizeargfunc)))\n", (4413, 4770), False, 'from pypy.module.cpyext.api import cpython_struct, Py_ssize_t, Py_ssize_tP, PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE\n'), ((4815, 4946), 'pypy.module.cpyext.api.cpython_struct', 'cpython_struct', (['"""PyMappingMethods"""', "(('mp_length', lenfunc), ('mp_subscript', binaryfunc), ('mp_ass_subscript',\n objobjargproc))"], {}), "('PyMappingMethods', (('mp_length', lenfunc), ('mp_subscript',\n binaryfunc), ('mp_ass_subscript', objobjargproc)))\n", (4829, 4946), False, 'from pypy.module.cpyext.api import cpython_struct, Py_ssize_t, Py_ssize_tP, PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE\n'), ((4975, 5245), 'pypy.module.cpyext.api.cpython_struct', 'cpython_struct', (['"""PyBufferProcs"""', "(('bf_getreadbuffer', readbufferproc), ('bf_getwritebuffer',\n writebufferproc), ('bf_getsegcount', segcountproc), ('bf_getcharbuffer',\n charbufferproc), ('bf_getbuffer', getbufferproc), ('bf_releasebuffer',\n releasebufferproc))"], {}), "('PyBufferProcs', (('bf_getreadbuffer', readbufferproc), (\n 'bf_getwritebuffer', writebufferproc), ('bf_getsegcount', segcountproc),\n ('bf_getcharbuffer', charbufferproc), ('bf_getbuffer', getbufferproc),\n ('bf_releasebuffer', releasebufferproc)))\n", (4989, 5245), False, 'from pypy.module.cpyext.api import cpython_struct, Py_ssize_t, Py_ssize_tP, PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE\n'), ((5275, 5435), 'pypy.module.cpyext.api.cpython_struct', 'cpython_struct', (['"""PyMemberDef"""', "(('name', rffi.CCHARP), ('type', rffi.INT_real), ('offset', Py_ssize_t), (\n 'flags', rffi.INT_real), ('doc', rffi.CCHARP))"], {}), "('PyMemberDef', (('name', rffi.CCHARP), ('type', rffi.\n INT_real), ('offset', Py_ssize_t), ('flags', rffi.INT_real), ('doc',\n rffi.CCHARP)))\n", (5289, 5435), False, 'from pypy.module.cpyext.api import cpython_struct, Py_ssize_t, Py_ssize_tP, PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE\n'), ((8241, 8305), 'pypy.module.cpyext.api.cpython_struct', 'cpython_struct', (['"""PyTypeObject"""', 'PyTypeObjectFields', 'PyTypeObject'], {}), "('PyTypeObject', PyTypeObjectFields, PyTypeObject)\n", (8255, 8305), False, 'from pypy.module.cpyext.api import cpython_struct, Py_ssize_t, Py_ssize_tP, PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE\n'), ((493, 536), 'pypy.rpython.lltypesystem.lltype.Array', 'lltype.Array', (['PyO'], {'hints': "{'nolength': True}"}), "(PyO, hints={'nolength': True})\n", (505, 536), False, 'from pypy.rpython.lltypesystem import rffi, lltype\n'), ((6264, 6284), 'pypy.rpython.lltypesystem.lltype.Ptr', 'Ptr', (['PyNumberMethods'], {}), '(PyNumberMethods)\n', (6267, 6284), False, 'from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void\n'), ((6313, 6335), 'pypy.rpython.lltypesystem.lltype.Ptr', 'Ptr', (['PySequenceMethods'], {}), '(PySequenceMethods)\n', (6316, 6335), False, 'from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void\n'), ((6363, 6384), 'pypy.rpython.lltypesystem.lltype.Ptr', 'Ptr', (['PyMappingMethods'], {}), '(PyMappingMethods)\n', (6366, 6384), False, 'from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void\n'), ((6717, 6735), 'pypy.rpython.lltypesystem.lltype.Ptr', 'Ptr', (['PyBufferProcs'], {}), '(PyBufferProcs)\n', (6720, 6735), False, 'from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void\n'), ((7471, 7487), 'pypy.rpython.lltypesystem.lltype.Ptr', 'Ptr', (['PyMethodDef'], {}), '(PyMethodDef)\n', (7474, 7487), False, 'from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void\n'), ((7512, 7528), 'pypy.rpython.lltypesystem.lltype.Ptr', 'Ptr', (['PyMemberDef'], {}), '(PyMemberDef)\n', (7515, 7528), False, 'from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void\n'), ((7552, 7568), 'pypy.rpython.lltypesystem.lltype.Ptr', 'Ptr', (['PyGetSetDef'], {}), '(PyGetSetDef)\n', (7555, 7568), False, 'from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void\n'), ((7591, 7608), 'pypy.rpython.lltypesystem.lltype.Ptr', 'Ptr', (['PyTypeObject'], {}), '(PyTypeObject)\n', (7594, 7608), False, 'from pypy.rpython.lltypesystem.lltype import Ptr, FuncType, Void\n')] |
import os
import unittest
from aruco import ArucoDetector
class TestArucoDetectionBlue(unittest.TestCase):
ar = ArucoDetector()
BLUE = 13
def test_blue(self):
result = self.ar.read_image(os.path.abspath("./datasets/blue.jpg"))
self.assertEqual(self.BLUE, result)
def test_blue_tilted_r(self):
result = self.ar.read_image(os.path.abspath("./datasets/blue-tilted-r.jpg"))
self.assertEqual(self.BLUE, result)
def test_blue_reverse(self):
result = self.ar.read_image(os.path.abspath("./datasets/blue-reverse.jpg"))
self.assertEqual(self.BLUE, result)
def test_blue_tilted_l(self):
result = self.ar.read_image(os.path.abspath("./datasets/blue-tilted-l.jpg"))
self.assertEqual(self.BLUE, result)
if __name__ == '__main__':
unittest.main(verbosity=3)
| [
"unittest.main",
"aruco.ArucoDetector",
"os.path.abspath"
] | [((120, 135), 'aruco.ArucoDetector', 'ArucoDetector', ([], {}), '()\n', (133, 135), False, 'from aruco import ArucoDetector\n'), ((820, 846), 'unittest.main', 'unittest.main', ([], {'verbosity': '(3)'}), '(verbosity=3)\n', (833, 846), False, 'import unittest\n'), ((213, 251), 'os.path.abspath', 'os.path.abspath', (['"""./datasets/blue.jpg"""'], {}), "('./datasets/blue.jpg')\n", (228, 251), False, 'import os\n'), ((368, 415), 'os.path.abspath', 'os.path.abspath', (['"""./datasets/blue-tilted-r.jpg"""'], {}), "('./datasets/blue-tilted-r.jpg')\n", (383, 415), False, 'import os\n'), ((531, 577), 'os.path.abspath', 'os.path.abspath', (['"""./datasets/blue-reverse.jpg"""'], {}), "('./datasets/blue-reverse.jpg')\n", (546, 577), False, 'import os\n'), ((694, 741), 'os.path.abspath', 'os.path.abspath', (['"""./datasets/blue-tilted-l.jpg"""'], {}), "('./datasets/blue-tilted-l.jpg')\n", (709, 741), False, 'import os\n')] |
import sys
import time
from datetime import datetime
from traceback import print_exc
import speedtest
from decorator import restartable
KILOBYTE = 1024
MEGABYTE = 1024 * KILOBYTE
REPORT_FREQ = 60
def test_setup(st):
st.get_servers()
st.get_best_server()
st.download() # bits/s
st.upload() # bits/s
res = st.results.dict()
download = "{:.2f}".format(res["download"] / MEGABYTE)
upload = "{:.2f}".format(res["upload"] / MEGABYTE)
ping = "{:.2f}".format(res["ping"])
return download, upload, ping
@restartable
def main():
# Check if command line argument for reporting freq is provided (min 30)
global REPORT_FREQ
if len(sys.argv) > 1 and int(sys.argv[1]) >= 30:
REPORT_FREQ = int(sys.argv[1])
try:
st = speedtest.Speedtest()
while True:
time_now = datetime.now().strftime("%H:%M:%S")
download, upload, ping = test_setup(st)
print(f"[{time_now}]: PING: {ping} ms\tDOWN: {download} Mbps\tUP: {upload} Mbps")
time.sleep(REPORT_FREQ)
except Exception as exc:
print("\nCaught exception: ", exc.__class__.__name__)
print_exc()
if __name__ == "__main__":
main()
| [
"datetime.datetime.now",
"traceback.print_exc",
"speedtest.Speedtest",
"time.sleep"
] | [((783, 804), 'speedtest.Speedtest', 'speedtest.Speedtest', ([], {}), '()\n', (802, 804), False, 'import speedtest\n'), ((1042, 1065), 'time.sleep', 'time.sleep', (['REPORT_FREQ'], {}), '(REPORT_FREQ)\n', (1052, 1065), False, 'import time\n'), ((1165, 1176), 'traceback.print_exc', 'print_exc', ([], {}), '()\n', (1174, 1176), False, 'from traceback import print_exc\n'), ((848, 862), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (860, 862), False, 'from datetime import datetime\n')] |
import os
from importlib import import_module
from django.apps import apps
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.serializer import serializer_factory
from django.db.models import ForeignKey, ManyToManyField
from django.utils.inspect import get_func_args
from django.utils.module_loading import module_dir
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
def fullname(o):
return o.__module__ + "." + o.__class__.__name__
class OperationWriter:
def __init__(self, operation, indentation=2):
self.operation = operation
self.buff = []
self.indentation = indentation
self.data = []
def serialize(self, app):
d = {}
def _write(_arg_name, _arg_value):
if _arg_name in self.operation.serialization_expand_args and isinstance(_arg_value, (list, tuple, dict)):
if isinstance(_arg_value, dict):
ds = {}
for a, b in _arg_value.items():
if any([isinstance(b, str), isinstance(b, list), isinstance(b, dict), isinstance(b, bool), isinstance(b, float), isinstance(b, int)]) or b is not None:
ds[a] = b
else:
ds[a] = str(b)
d[_arg_name] = ds
else:
f = []
for item in _arg_value:
if isinstance(item, tuple):
if len(item) == 2:
props = {}
i = item[1].__dict__
props["type_name"] = fullname(item[1])
props["choices"] = i.get("choices", None)
props["blank"] = i.get("blank", True)
props["is_null"] = i.get("null", True)
props["primary_key"] = i.get("primary_key", False)
props["help_text"] = i.get("help_text", '')
props["max_length"] = i.get("max_length", None)
props["verbose_name"] = i.get("verbose_name", None)
if "default" in i:
props["default"] = str(i["default"]) if type(i["default"]) not in [set, list, dict, int, float, bool, type(None)] else i["default"]
else:
props["default"] = None
f.append({'name': str(item[0]), 'props': props})
else:
f.append(list(item))
elif (
any([isinstance(item, str), isinstance(item, list), isinstance(item, dict), isinstance(item, bool), isinstance(item, float), isinstance(item, int)])
or item is None
):
f.append(item)
else:
f.append(str(item))
d[_arg_name] = f
elif isinstance(_arg_value, ForeignKey):
ab = {
"many_to_many": bool(_arg_value.many_to_many),
"many_to_one": bool(_arg_value.many_to_one),
"one_to_many": bool(_arg_value.one_to_many),
"one_to_one": bool(_arg_value.one_to_one),
"field_str": str(_arg_value),
"to": str(_arg_value.remote_field.model).replace("__fake__.", "").replace("<class", "").replace("'", "").replace(">", "").replace(" ", ""),
}
d[_arg_name] = ab
d["related"] = True
elif isinstance(_arg_value, ManyToManyField):
ab = {
"many_to_many": bool(_arg_value.many_to_many),
"many_to_one": bool(_arg_value.many_to_one),
"one_to_many": bool(_arg_value.one_to_many),
"one_to_one": bool(_arg_value.one_to_one),
"field_str": str(_arg_value),
"to": str(_arg_value.remote_field.model).replace("__fake__.", "").replace("<class", "").replace("'", "").replace(">", "").replace(" ", ""),
}
d[_arg_name] = ab
d["related"] = True
elif (
any(
[
isinstance(_arg_value, str),
isinstance(_arg_value, list),
isinstance(_arg_value, dict),
isinstance(_arg_value, bool),
isinstance(_arg_value, float),
isinstance(_arg_value, int),
]
)
or _arg_value is None
):
d[_arg_name] = _arg_value
else:
d[_arg_name] = str(_arg_value)
name, args, kwargs = self.operation.deconstruct()
operation_args = get_func_args(self.operation.__init__)
for i, arg in enumerate(args):
arg_value = arg
arg_name = operation_args[i]
_write(arg_name, arg_value)
i = len(args)
for arg_name in operation_args[i:]:
if arg_name in kwargs:
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
if "name" in d:
d["name"] = app + "." + d["name"]
return d
class MigrationWriter:
"""
Take a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
def as_list(self, app):
operations = []
for operation in self.migration.operations:
operations.append(OperationWriter(operation).serialize(app))
return operations
@property
def basedir(self):
migrations_package_name, _ = MigrationLoader.migrations_module(self.migration.app_label)
if migrations_package_name is None:
raise ValueError("Django can't create migrations for app '%s' because " "migrations have been disabled via the MIGRATION_MODULES " "setting." % self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
except ImportError:
pass
else:
try:
return module_dir(migrations_module)
except ValueError:
pass
# Alright, see if it's a direct submodule of the app
app_config = apps.get_app_config(self.migration.app_label)
maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".")
if app_config.name == maybe_app_name:
return os.path.join(app_config.path, migrations_package_basename)
# In case of using MIGRATION_MODULES setting and the custom package
# doesn't exist, create one, starting from an existing package
existing_dirs, missing_dirs = migrations_package_name.split("."), []
while existing_dirs:
missing_dirs.insert(0, existing_dirs.pop(-1))
try:
base_module = import_module(".".join(existing_dirs))
except ImportError:
continue
else:
try:
base_dir = module_dir(base_module)
except ValueError:
continue
else:
break
else:
raise ValueError(
"Could not locate an appropriate location to create " "migrations package %s. Make sure the toplevel " "package exists and can be imported." % migrations_package_name
)
final_dir = os.path.join(base_dir, *missing_dirs)
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
for missing_dir in missing_dirs:
base_dir = os.path.join(base_dir, missing_dir)
with open(os.path.join(base_dir, "__init__.py"), "w"):
pass
return final_dir
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
return os.path.join(self.basedir, self.filename)
@classmethod
def serialize(cls, value):
return serializer_factory(value).serialize()
| [
"importlib.import_module",
"django.utils.inspect.get_func_args",
"os.makedirs",
"os.path.join",
"django.db.migrations.serializer.serializer_factory",
"django.apps.apps.get_app_config",
"django.db.migrations.loader.MigrationLoader.migrations_module",
"os.path.isdir",
"django.utils.module_loading.modu... | [((5448, 5486), 'django.utils.inspect.get_func_args', 'get_func_args', (['self.operation.__init__'], {}), '(self.operation.__init__)\n', (5461, 5486), False, 'from django.utils.inspect import get_func_args\n'), ((6407, 6466), 'django.db.migrations.loader.MigrationLoader.migrations_module', 'MigrationLoader.migrations_module', (['self.migration.app_label'], {}), '(self.migration.app_label)\n', (6440, 6466), False, 'from django.db.migrations.loader import MigrationLoader\n'), ((7105, 7150), 'django.apps.apps.get_app_config', 'apps.get_app_config', (['self.migration.app_label'], {}), '(self.migration.app_label)\n', (7124, 7150), False, 'from django.apps import apps\n'), ((8295, 8332), 'os.path.join', 'os.path.join', (['base_dir', '*missing_dirs'], {}), '(base_dir, *missing_dirs)\n', (8307, 8332), False, 'import os\n'), ((8757, 8798), 'os.path.join', 'os.path.join', (['self.basedir', 'self.filename'], {}), '(self.basedir, self.filename)\n', (8769, 8798), False, 'import os\n'), ((6802, 6840), 'importlib.import_module', 'import_module', (['migrations_package_name'], {}), '(migrations_package_name)\n', (6815, 6840), False, 'from importlib import import_module\n'), ((7313, 7371), 'os.path.join', 'os.path.join', (['app_config.path', 'migrations_package_basename'], {}), '(app_config.path, migrations_package_basename)\n', (7325, 7371), False, 'import os\n'), ((8348, 8372), 'os.path.isdir', 'os.path.isdir', (['final_dir'], {}), '(final_dir)\n', (8361, 8372), False, 'import os\n'), ((8386, 8408), 'os.makedirs', 'os.makedirs', (['final_dir'], {}), '(final_dir)\n', (8397, 8408), False, 'import os\n'), ((8473, 8508), 'os.path.join', 'os.path.join', (['base_dir', 'missing_dir'], {}), '(base_dir, missing_dir)\n', (8485, 8508), False, 'import os\n'), ((6940, 6969), 'django.utils.module_loading.module_dir', 'module_dir', (['migrations_module'], {}), '(migrations_module)\n', (6950, 6969), False, 'from django.utils.module_loading import module_dir\n'), ((8863, 8888), 'django.db.migrations.serializer.serializer_factory', 'serializer_factory', (['value'], {}), '(value)\n', (8881, 8888), False, 'from django.db.migrations.serializer import serializer_factory\n'), ((7897, 7920), 'django.utils.module_loading.module_dir', 'module_dir', (['base_module'], {}), '(base_module)\n', (7907, 7920), False, 'from django.utils.module_loading import module_dir\n'), ((8531, 8568), 'os.path.join', 'os.path.join', (['base_dir', '"""__init__.py"""'], {}), "(base_dir, '__init__.py')\n", (8543, 8568), False, 'import os\n')] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import os
from bigdl.util.common import *
class TestEngineEnv():
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
pass
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
pass
def test___prepare_bigdl_env(self):
# BigDL will automatically execute 'prepare_env()' function which
# includes '__prepare_bigdl_env()'. To test if there's no more duplicate
# adding jar path message, just do prepare_env()' again
# to see if the log is correct and the environment variables should not vary.
from bigdl.util.engine import prepare_env
bigdl_jars_env_1 = os.environ.get("BIGDL_JARS", None)
spark_class_path_1 = os.environ.get("SPARK_CLASSPATH", None)
sys_path_1 = sys.path
prepare_env()
# there should be no duplicate messages about adding jar path to
# the environment var "BIGDL_JARS"
# environment variables should remain the same
bigdl_jars_env_2 = os.environ.get("BIGDL_JARS", None)
spark_class_path_2 = os.environ.get("SPARK_CLASSPATH", None)
sys_path_2 = sys.path
assert bigdl_jars_env_1 == bigdl_jars_env_2
assert spark_class_path_1 == spark_class_path_2
assert sys_path_1 == sys_path_2
if __name__ == '__main__':
pytest.main()
| [
"bigdl.util.engine.prepare_env",
"os.environ.get",
"pytest.main"
] | [((2128, 2141), 'pytest.main', 'pytest.main', ([], {}), '()\n', (2139, 2141), False, 'import pytest\n'), ((1460, 1494), 'os.environ.get', 'os.environ.get', (['"""BIGDL_JARS"""', 'None'], {}), "('BIGDL_JARS', None)\n", (1474, 1494), False, 'import os\n'), ((1524, 1563), 'os.environ.get', 'os.environ.get', (['"""SPARK_CLASSPATH"""', 'None'], {}), "('SPARK_CLASSPATH', None)\n", (1538, 1563), False, 'import os\n'), ((1602, 1615), 'bigdl.util.engine.prepare_env', 'prepare_env', ([], {}), '()\n', (1613, 1615), False, 'from bigdl.util.engine import prepare_env\n'), ((1814, 1848), 'os.environ.get', 'os.environ.get', (['"""BIGDL_JARS"""', 'None'], {}), "('BIGDL_JARS', None)\n", (1828, 1848), False, 'import os\n'), ((1878, 1917), 'os.environ.get', 'os.environ.get', (['"""SPARK_CLASSPATH"""', 'None'], {}), "('SPARK_CLASSPATH', None)\n", (1892, 1917), False, 'import os\n')] |
# MIT License
# Copyright (c) 2019 JetsonHacks
# See license
# Using a CSI camera (such as the Raspberry Pi Version 2) connected to a
# NVIDIA Jetson Nano Developer Kit using OpenCV
# Drivers for the camera and OpenCV are included in the base image
from __future__ import print_function
import os
import argparse
import numpy as np
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from train import Net
# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
# Defaults to 1280x720 @ 60fps
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of the window on the screen
def gstreamer_pipeline (capture_width=1280, capture_height=720, display_width=640, display_height=360, framerate=20, flip_method=0) :
return ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)%d, height=(int)%d, '
'format=(string)NV12, framerate=(fraction)%d/1 ! '
'nvvidconv flip-method=%d ! '
'video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! '
'videoconvert ! '
'video/x-raw, format=(string)BGR ! appsink' % (capture_width,capture_height,framerate,flip_method,display_width,display_height))
def transform_inputs(image, device):
img = cv2.resize(image, (28, 28), interpolation=cv2.INTER_AREA)
# img = cv2.resize(image, (28, 28))
# image = cv2.threshold(image0, 50, 255, cv2.THRESH_BINARY)[1]
blur = cv2.GaussianBlur(img,(5,5),0)
image = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
# image = image[np.newaxis, ...]
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
inputs = transform(image)
inputs = inputs.unsqueeze(0)
return inputs.to(device)
def show_camera(args):
# To flip the image, modify the flip_method parameter (0 and 2 are the most common)
print(gstreamer_pipeline(flip_method=0))
labels = []
with open("data/labels.txt", "r") as f:
for line in f:
labels.append(line.strip())
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
model = Net(n_classes=len(labels))
model.load_state_dict(torch.load(args.model_path))
model = model.to(device)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
org = (30, 50)
color = (0, 0, 255)
thickness = 2
cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
if cap.isOpened():
window_handle = cv2.namedWindow('Camera', cv2.WINDOW_AUTOSIZE)
# Window
while cv2.getWindowProperty('Camera',0) >= 0:
ret_val, img = cap.read()
# Convert to grayscale and apply Gaussian filtering
im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0)
# Threshold the image
ret, im_th = cv2.threshold(im_gray, 90, 255, cv2.THRESH_BINARY_INV)
# Find contours in the image
im2, ctrs, hier = cv2.findContours(im_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get rectangles contains each contour
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
# For each rectangular region, calculate HOG features and predict
# the digit using Linear SVM.
for rect in rects:
# Draw the rectangles
cv2.rectangle(img, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3)
# Make the rectangular region around the digit
leng = int(rect[3] * 1.6)
pt1 = int(rect[1] + rect[3] // 2 - leng // 2)
pt2 = int(rect[0] + rect[2] // 2 - leng // 2)
roi = im_gray[pt1:pt1+leng, pt2:pt2+leng]
# Resize the image
h, w = roi.shape
if h > 10 and w > 10:
# Transform inputs
inputs = transform_inputs(roi, device)
# Run Model Evaluation
output = model(inputs)
result = output.data.cpu().numpy().argmax()
cv2.putText(img, labels[result], (rect[0], rect[1]),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 255, 255), 3)
cv2.imshow("Camera", img)
# This also acts as
keyCode = cv2.waitKey(30) & 0xff
# Stop the program on the ESC key
if keyCode == 27:
break
cap.release()
cv2.destroyAllWindows()
else:
print('Unable to open camera')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--model_path', default="models/model.pt")
args = parser.parse_args()
show_camera(args)
| [
"cv2.rectangle",
"cv2.imshow",
"torch.cuda.is_available",
"cv2.destroyAllWindows",
"argparse.ArgumentParser",
"cv2.threshold",
"torchvision.transforms.ToTensor",
"cv2.waitKey",
"cv2.putText",
"torchvision.transforms.Normalize",
"cv2.cvtColor",
"cv2.getWindowProperty",
"cv2.resize",
"cv2.Ga... | [((1392, 1449), 'cv2.resize', 'cv2.resize', (['image', '(28, 28)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (28, 28), interpolation=cv2.INTER_AREA)\n', (1402, 1449), False, 'import cv2\n'), ((1568, 1600), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (1584, 1600), False, 'import cv2\n'), ((2280, 2305), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2303, 2305), False, 'import torch\n'), ((2319, 2362), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (2331, 2362), False, 'import torch\n'), ((4866, 4926), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (4889, 4926), False, 'import argparse\n'), ((1610, 1678), 'cv2.threshold', 'cv2.threshold', (['blur', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (1623, 1678), False, 'import cv2\n'), ((2428, 2455), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (2438, 2455), False, 'import torch\n'), ((2731, 2777), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Camera"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('Camera', cv2.WINDOW_AUTOSIZE)\n", (2746, 2777), False, 'import cv2\n'), ((4751, 4774), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4772, 4774), False, 'import cv2\n'), ((1779, 1800), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1798, 1800), False, 'from torchvision import datasets, transforms\n'), ((1829, 1871), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (1849, 1871), False, 'from torchvision import datasets, transforms\n'), ((2811, 2845), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['"""Camera"""', '(0)'], {}), "('Camera', 0)\n", (2832, 2845), False, 'import cv2\n'), ((2976, 3013), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2988, 3013), False, 'import cv2\n'), ((3036, 3072), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im_gray', '(5, 5)', '(0)'], {}), '(im_gray, (5, 5), 0)\n', (3052, 3072), False, 'import cv2\n'), ((3133, 3187), 'cv2.threshold', 'cv2.threshold', (['im_gray', '(90)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(im_gray, 90, 255, cv2.THRESH_BINARY_INV)\n', (3146, 3187), False, 'import cv2\n'), ((3260, 3327), 'cv2.findContours', 'cv2.findContours', (['im_th', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(im_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (3276, 3327), False, 'import cv2\n'), ((4526, 4551), 'cv2.imshow', 'cv2.imshow', (['"""Camera"""', 'img'], {}), "('Camera', img)\n", (4536, 4551), False, 'import cv2\n'), ((3401, 3422), 'cv2.boundingRect', 'cv2.boundingRect', (['ctr'], {}), '(ctr)\n', (3417, 3422), False, 'import cv2\n'), ((3646, 3745), 'cv2.rectangle', 'cv2.rectangle', (['img', '(rect[0], rect[1])', '(rect[0] + rect[2], rect[1] + rect[3])', '(0, 255, 0)', '(3)'], {}), '(img, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3\n ]), (0, 255, 0), 3)\n', (3659, 3745), False, 'import cv2\n'), ((4601, 4616), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (4612, 4616), False, 'import cv2\n'), ((4403, 4506), 'cv2.putText', 'cv2.putText', (['img', 'labels[result]', '(rect[0], rect[1])', 'cv2.FONT_HERSHEY_DUPLEX', '(2)', '(0, 255, 255)', '(3)'], {}), '(img, labels[result], (rect[0], rect[1]), cv2.\n FONT_HERSHEY_DUPLEX, 2, (0, 255, 255), 3)\n', (4414, 4506), False, 'import cv2\n')] |
from aoc.day06_1 import run
if __name__ == "__main__":
print(run(256))
| [
"aoc.day06_1.run"
] | [((66, 74), 'aoc.day06_1.run', 'run', (['(256)'], {}), '(256)\n', (69, 74), False, 'from aoc.day06_1 import run\n')] |
"""
Abinit workflows
"""
from __future__ import division, print_function
import sys
import os
import os.path
import shutil
import abc
import collections
import functools
import numpy as np
from pprint import pprint
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.design_patterns import Enum, AttrDict
from pymatgen.core.physical_constants import Bohr2Ang, Ang2Bohr, Ha2eV, Ha_eV, Ha2meV
from pymatgen.serializers.json_coders import MSONable, json_pretty_dump
from pymatgen.io.smartio import read_structure
from pymatgen.util.num_utils import iterator_from_slice, chunks
from pymatgen.io.abinitio.task import task_factory, Task
from .utils import abinit_output_iscomplete, File
from .netcdf import GSR_Reader
from .abiobjects import Smearing, AbiStructure, KSampling, Electrons
from .pseudos import Pseudo, PseudoDatabase, PseudoTable, get_abinit_psp_dir
from .strategies import ScfStrategy
from .task import RunMode
#import logging
#logger = logging.getLogger(__name__)
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
#__all__ = [
#]
################################################################################
def map_method(method):
"Decorator that calls item.method for all items in a iterable object."
@functools.wraps(method)
def wrapped(iter_obj, *args, **kwargs):
return [getattr(item, method.__name__)(*args, **kwargs)
for item in iter_obj]
return wrapped
################################################################################
class Product(object):
"""
A product represents a file produced by an AbinitTask instance, file
that is needed by another task in order to start the calculation.
"""
# TODO
# It would be nice to pass absolute paths to abinit with getden_path
# so that I can avoid creating symbolic links before running but
# the presence of the C-bindings complicates the implementation
# (gfortran SIGFAULTs if I add strings to dataset_type!
_ext2abivars = {
"_DEN": {"irdden": 1},
"_WFK": {"irdwfk": 1},
"_SCR": {"irdscr": 1},
"_QPS": {"irdqps": 1},
}
def __init__(self, ext, path):
self.ext = ext
self.file = File(path)
def __str__(self):
return "ext = %s, file = %s" % (self.ext, self.file)
def get_filepath(self):
return self.file.path
def get_abivars(self):
return self._ext2abivars[self.ext].copy()
class WorkLink(object):
"""
This object describes the dependencies among the tasks contained in a Work instance.
A WorkLink is a task that produces a list of products (files) that are
reused by the other tasks belonging to a Work instance.
One usually instantiates the object by calling work.register_task and produces_exts.
Example:
# Register the SCF task in work and get the link.
scf_link = work.register_task(scf_strategy)
# Register the NSCF calculation and its dependency on the SCF run.
nscf_link = work.register_task(nscf_strategy, links=scf_link.produces_exts("_DEN"))
"""
def __init__(self, task, exts=None):
"""
Args:
task:
The task associated to the link.
exts:
Extensions of the output files that are needed for running the other tasks.
"""
self._task = task
self._products = []
if exts is not None:
if isinstance(exts, str):
exts = [exts,]
for ext in exts:
prod = Product(ext, task.odata_path_from_ext(ext))
self._products.append(prod)
def __str__(self):
s = "%s: task %s with products\n %s" % (
self.__class__.__name__, repr(self._task), "\n".join(str(p) for p in self.products))
return s
@property
def products(self):
return self._products
def produces_exts(self, exts):
return WorkLink(self._task, exts=exts)
def get_abivars(self):
"""
Returns a dictionary with the abinit variables that must
be added to the input file in order to connect the two tasks.
"""
abivars = {}
for prod in self._products:
abivars.update(prod.get_abivars())
return abivars
def get_filepaths_and_exts(self):
"Returns the paths of the output files produced by self and its extensions"
filepaths = [prod.get_filepath() for prod in self._products]
exts = [prod.ext for prod in self._products]
return filepaths, exts
@property
def status(self):
"The status of the link, equivalent to the task status"
return self._task.status
################################################################################
class WorkflowError(Exception):
"Base class for the exceptions raised by Workflow objects"
class BaseWorkflow(object):
__metaclass__ = abc.ABCMeta
Error = WorkflowError
# interface modeled after subprocess.Popen
@abc.abstractproperty
def processes(self):
"Return a list of objects that support the subprocess.Popen protocol."
def poll(self):
"""
Check if all child processes have terminated. Set and return
returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode
attributes.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncpus_reserved(self):
"Returns the number of CPUs reserved in this moment."
ncpus = 0
for task in self:
if task.status in [task.S_SUB, task.S_RUN]:
ncpus += task.tot_ncpus
return ncpus
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or None if no task can be submitted at present"
Raises StopIteration if all tasks are done.
"""
for task in self:
# The task is ready to run if its status is S_READY and all the other links (if any) are done!
if (task.status == task.S_READY) and all([link_stat==task.S_DONE for link_stat in task.links_status]):
return task
# All the tasks are done so raise an exception that will be handled by the client code.
if all([task.status == task.S_DONE for task in self]):
raise StopIteration
# No task found, this usually happens when we have dependencies. Beware of possible deadlocks here!
return None
@abc.abstractmethod
def setup(self, *args, **kwargs):
"Method called before submitting the calculations."
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def get_results(self, *args, **kwargs):
"""
Method called once the calculations completes.
The base version returns a dictionary task_name : TaskResults for each task in self.
"""
return WorkFlowResults(task_results={task.name: task.results for task in self})
##########################################################################################
class WorkFlowResults(dict, MSONable):
"""
Dictionary used to store some of the results produce by a Task object
"""
_mandatory_keys = [
"task_results",
]
EXC_KEY = "_exceptions"
def __init__(self, *args, **kwargs):
super(WorkFlowResults, self).__init__(*args, **kwargs)
if self.EXC_KEY not in self:
self[self.EXC_KEY] = []
@property
def exceptions(self):
return self[self.EXC_KEY]
def push_exceptions(self, *exceptions):
for exc in exceptions:
newstr = str(exc)
if newstr not in self.exceptions:
self[self.EXC_KEY] += [newstr,]
def assert_valid(self):
"""
Returns empty string if results seem valid.
The try assert except trick allows one to get a string with info on the exception.
We use the += operator so that sub-classes can add their own message.
"""
# Validate tasks.
for tres in self.task_results:
self[self.EXC_KEY] += tres.assert_valid()
return self[self.EXC_KEY]
@property
def to_dict(self):
d = {k: v for k,v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
mydict = {k: v for k,v in d.items() if k not in ["@module", "@class",]}
return cls(mydict)
def json_dump(self, filename):
json_pretty_dump(self.to_dict, filename)
@classmethod
def json_load(cls, filename):
return cls.from_dict(json_load(filename))
##########################################################################################
class Workflow(BaseWorkflow, MSONable):
"""
A Workflow is a list of (possibly connected) tasks.
"""
Error = WorkflowError
#@classmethod
#def from_task(cls, task):
# "Build a Work instance from a task object"
# workdir, tail = os.path.dirname(task.workdir)
# new = cls(workdir, taks.runmode)
# new.register_task(task.input)
# return new
def __init__(self, workdir, runmode, **kwargs):
"""
Args:
workdir:
Path to the working directory.
runmode:
RunMode instance or string "sequential"
"""
self.workdir = os.path.abspath(workdir)
self.runmode = RunMode.asrunmode(runmode)
self._kwargs = kwargs
self._tasks = []
# Dict with the dependencies of each task, indexed by task.id
self._links_dict = collections.defaultdict(list)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def chunks(self, chunk_size):
"Yield successive chunks of tasks of lenght chunk_size."
for tasks in chunks(self, chunk_size):
yield tasks
def __getitem__(self, slice):
return self._tasks[slice]
def __repr__(self):
return "<%s at %s, workdir = %s>" % (self.__class__.__name__, id(self), str(self.workdir))
@property
def to_dict(self):
d = {"workdir": self.workdir,
"runmode": self.runmode.to_dict,
"kwargs" : self._kwargs,
}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@staticmethod
def from_dict(d):
return Work(d["workdir"], d["runmode"], **d["kwargs"])
@property
def alldone(self):
return all([task.status == Task.S_DONE for task in self])
@property
def isnc(self):
"True if norm-conserving calculation"
return all(task.isnc for task in self)
@property
def ispaw(self):
"True if PAW calculation"
return all(task.ispaw for task in self)
def path_in_workdir(self, filename):
"Create the absolute path of filename in the workind directory."
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
#def show_inputs(self, stream=sys.stdout):
# lines = []
# app = lines.append
# width = 120
# for task in self:
# app("\n")
# app(repr(task))
# app("\ninput: %s" % task.input_file.path)
# app("\n")
# app(str(task.input))
# app(width*"=" + "\n")
# stream.write("\n".join(lines))
def register_task(self, strategy, links=()):
"""
Registers a new task:
- creates a new AbinitTask from the input strategy.
- adds the new task to the internal list, taking into account possible dependencies.
Returns: WorkLink object
"""
task_id = len(self) + 1
task_workdir = os.path.join(self.workdir, "task_" + str(task_id))
# Handle possible dependencies.
if links:
if not isinstance(links, collections.Iterable):
links = [links,]
# Create the new task (note the factory so that we create subclasses easily).
task = task_factory(strategy, task_workdir, self.runmode, task_id=task_id, links=links)
self._tasks.append(task)
if links:
self._links_dict[task_id].extend(links)
print("task_id %s neeeds\n %s" % (task_id, [str(l) for l in links]))
return WorkLink(task)
def build(self, *args, **kwargs):
"Creates the top level directory"
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
def get_status(self, only_highest_rank=False):
"Get the status of the tasks in self."
status_list = [task.status for task in self]
if only_highest_rank:
return max(status_list)
else:
return status_list
@property
def processes(self):
return [task.process for task in self]
def rmtree(self, *args, **kwargs):
"""
Remove all calculation files and directories.
Keyword arguments:
force: (False)
Do not ask confirmation.
verbose: (0)
Print message if verbose is not zero.
"""
if kwargs.pop('verbose', 0):
print('Removing directory tree: %s' % self.workdir)
shutil.rmtree(self.workdir)
def move(self, dst, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dst is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dst = os.path.join(os.path.dirname(self.workdir), dst)
shutil.move(self.workdir, dst)
def submit_tasks(self, *args, **kwargs):
"""
Submits the task in self.
"""
for task in self:
task.start(*args, **kwargs)
# FIXME
task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then the tasks are submitted.
Non-blocking call
"""
# Build dirs and files.
self.build(*args, **kwargs)
# Initial setup
self._setup(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(*args, **kwargs)
def read_etotal(self):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.alldone:
raise self.Error("Some task is still in running/submitted state")
etotal = []
for task in self:
# Open the GSR file and read etotal (Hartree)
with GSR_Reader(task.odata_path_from_ext("_GSR")) as ncdata:
etotal.append(ncdata.read_value("etotal"))
return etotal
################################################################################
class IterativeWork(Workflow):
"""
TODO
"""
__metaclass__ = abc.ABCMeta
def __init__(self, workdir, runmode, strategy_generator, max_niter=25):
"""
Args:
workdir:
Working directory.
strategy_generator:
Strategy generator.
max_niter:
Maximum number of iterations. A negative value or zero value
is equivalent to having an infinite number of iterations.
"""
super(IterativeWork, self).__init__(workdir, runmode)
self.strategy_generator = strategy_generator
self.max_niter = max_niter
def next_task(self):
"""
Generate and register a new task
Return: task object
"""
try:
next_strategy = next(self.strategy_generator)
except StopIteration:
raise StopIteration
self.register_task(next_strategy)
assert len(self) == self.niter
return self[-1]
def submit_tasks(self, *args, **kwargs):
"""
Run the tasks till self.exit_iteration says to exit or the number of iterations exceeds self.max_niter
Return dictionary with the final results
"""
self.niter = 1
while True:
if self.max_niter > 0 and self.niter > self.max_niter:
print("niter %d > max_niter %d" % (self.niter, self.max_niter))
break
try:
task = self.next_task()
except StopIteration:
break
# Start the task and block till completion.
task.start(*args, **kwargs)
task.wait()
data = self.exit_iteration(*args, **kwargs)
if data["exit"]:
break
self.niter += 1
@abc.abstractmethod
def exit_iteration(self, *args, **kwargs):
"""
Return a dictionary with the results produced at the given iteration.
The dictionary must contains an entry "converged" that evaluates to
True if the iteration should be stopped.
"""
##########################################################################################
def strictly_increasing(values):
return all(x<y for x, y in zip(values, values[1:]))
def strictly_decreasing(values):
return all(x>y for x, y in zip(values, values[1:]))
def non_increasing(values):
return all(x>=y for x, y in zip(values, values[1:]))
def non_decreasing(values):
return all(x<=y for x, y in zip(values, values[1:]))
def monotonic(values, mode="<", atol=1.e-8):
"""
Returns False if values are not monotonic (decreasing|increasing).
mode is "<" for a decreasing sequence, ">" for an increasing sequence.
Two numbers are considered equal if they differ less that atol.
.. warning:
Not very efficient for large data sets.
>>> values = [1.2, 1.3, 1.4]
>>> monotonic(values, mode="<")
False
>>> monotonic(values, mode=">")
True
"""
if len(values) == 1:
return True
if mode == ">":
for i in range(len(values)-1):
v, vp = values[i], values[i+1]
if abs(vp - v) > atol and vp <= v:
return False
elif mode == "<":
for i in range(len(values)-1):
v, vp = values[i], values[i+1]
if abs(vp - v) > atol and vp >= v:
return False
else:
raise ValueError("Wrong mode %s" % mode)
return True
def check_conv(values, tol, min_numpts=1, mode="abs", vinf=None):
"""
Given a list of values and a tolerance tol, returns the leftmost index for which
abs(value[i] - vinf) < tol if mode == "abs"
or
abs(value[i] - vinf) / vinf < tol if mode == "rel"
returns -1 if convergence is not achieved. By default, vinf = values[-1]
Args:
tol:
Tolerance
min_numpts:
Minimum number of points that must be converged.
mode:
"abs" for absolute convergence, "rel" for relative convergence.
vinf:
Used to specify an alternative value instead of values[-1].
"""
vinf = values[-1] if vinf is None else vinf
if mode == "abs":
vdiff = [abs(v - vinf) for v in values]
elif mode == "rel":
vdiff = [abs(v - vinf) / vinf for v in values]
else:
raise ValueError("Wrong mode %s" % mode)
numpts = len(vdiff)
i = -2
if (numpts > min_numpts) and vdiff[-2] < tol:
for i in range(numpts-1, -1, -1):
if vdiff[i] > tol:
break
if (numpts - i -1) < min_numpts: i = -2
return i + 1
def compute_hints(ecut_list, etotal, atols_mev, pseudo, min_numpts=1, stream=sys.stdout):
de_low, de_normal, de_high = [a / (1000 * Ha_eV) for a in atols_mev]
num_ene = len(etotal)
etotal_inf = etotal[-1]
ihigh = check_conv(etotal, de_high, min_numpts=min_numpts)
inormal = check_conv(etotal, de_normal)
ilow = check_conv(etotal, de_low)
accidx = {"H": ihigh, "N": inormal, "L": ilow}
table = []
app = table.append
app(["iter", "ecut", "etotal", "et-e_inf [meV]", "accuracy",])
for idx, (ec, et) in enumerate(zip(ecut_list, etotal)):
line = "%d %.1f %.7f %.3f" % (idx, ec, et, (et-etotal_inf)* Ha_eV * 1.e+3)
row = line.split() + ["".join(c for c,v in accidx.items() if v == idx)]
app(row)
if stream is not None:
from pymatgen.util.string_utils import pprint_table
stream.write("pseudo: %s\n" % pseudo.name)
pprint_table(table, out=stream)
ecut_high, ecut_normal, ecut_low = 3 * (None,)
exit = (ihigh != -1)
if exit:
ecut_low = ecut_list[ilow]
ecut_normal = ecut_list[inormal]
ecut_high = ecut_list[ihigh]
aug_ratios = [1,]
aug_ratio_low, aug_ratio_normal, aug_ratio_high = 3 * (1,)
data = {
"exit" : ihigh != -1,
"etotal" : list(etotal),
"ecut_list" : ecut_list,
"aug_ratios" : aug_ratios,
"low" : {"ecut": ecut_low, "aug_ratio": aug_ratio_low},
"normal" : {"ecut": ecut_normal, "aug_ratio": aug_ratio_normal},
"high" : {"ecut": ecut_high, "aug_ratio": aug_ratio_high},
"pseudo_name": pseudo.name,
"pseudo_path": pseudo.path,
"atols_mev" : atols_mev,
"dojo_level" : 0,
}
return data
##########################################################################################
def plot_etotal(ecut_list, etotals, aug_ratios, show=True, savefig=None, *args, **kwargs):
"""
Uses Matplotlib to plot the energy curve as function of ecut
Args:
ecut_list:
List of cutoff energies
etotals:
Total energies in Hartree, see aug_ratios
aug_ratios:
List augmentation rations. [1,] for norm-conserving, [4, ...] for PAW
The number of elements in aug_ration must equal the number of (sub)lists
in etotals. Example:
- NC: etotals = [3.4, 4,5 ...], aug_ratios = [1,]
- PAW: etotals = [[3.4, ...], [3.6, ...]], aug_ratios = [4,6]
show:
True to show the figure
savefig:
'abc.png' or 'abc.eps'* to save the figure to a file.
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
npts = len(ecut_list)
if len(aug_ratios) != 1 and len(aug_ratios) != len(etotals):
raise ValueError("The number of sublists in etotal must equal the number of aug_ratios")
if len(aug_ratios) == 1:
etotals = [etotals,]
lines, legends = [], []
emax = -np.inf
for (aratio, etot) in zip(aug_ratios, etotals):
emev = Ha2meV(etot)
emev_inf = npts * [emev[-1]]
yy = emev - emev_inf
emax = max(emax, np.max(yy))
line, = ax.plot(ecut_list, yy, "-->", linewidth=3.0, markersize=10)
lines.append(line)
legends.append("aug_ratio = %s" % aratio)
ax.legend(lines, legends, 'upper right', shadow=True)
# Set xticks and labels.
ax.grid(True)
ax.set_xlabel("Ecut [Ha]")
ax.set_ylabel("$\Delta$ Etotal [meV]")
ax.set_xticks(ecut_list)
#ax.yaxis.set_view_interval(-10, emax + 0.01 * abs(emax))
ax.yaxis.set_view_interval(-10, 20)
ax.set_title("$\Delta$ Etotal Vs Ecut")
if show:
plt.show()
if savefig is not None:
fig.savefig(savefig)
##########################################################################################
class PseudoConvergence(Workflow):
def __init__(self, workdir, pseudo, ecut_list, atols_mev,
runmode="sequential", spin_mode="polarized", acell=(8, 9, 10), smearing="fermi_dirac:0.1 eV",):
super(PseudoConvergence, self).__init__(workdir, runmode)
# Temporary object used to build the strategy.
generator = PseudoIterativeConvergence(workdir, pseudo, ecut_list, atols_mev,
spin_mode = spin_mode,
acell = acell,
smearing = smearing,
max_niter = len(ecut_list),
)
self.atols_mev = atols_mev
self.pseudo = Pseudo.aspseudo(pseudo)
self.ecut_list = []
for ecut in ecut_list:
strategy = generator.strategy_with_ecut(ecut)
self.ecut_list.append(ecut)
self.register_task(strategy)
def get_results(self, *args, **kwargs):
# Get the results of the tasks.
wf_results = super(PseudoConvergence, self).get_results()
etotal = self.read_etotal()
data = compute_hints(self.ecut_list, etotal, self.atols_mev, self.pseudo)
plot_etotal(data["ecut_list"], data["etotal"], data["aug_ratios"],
show=False, savefig=self.path_in_workdir("etotal.pdf"))
wf_results.update(data)
if not monotonic(etotal, mode="<", atol=1.0e-5):
print("E(ecut) is not decreasing")
wf_results.push_exceptions("E(ecut) is not decreasing")
if kwargs.get("json_dump", True):
wf_results.json_dump(self.path_in_workdir("results.json"))
return wf_results
class PseudoIterativeConvergence(IterativeWork):
def __init__(self, workdir, pseudo, ecut_list_or_slice, atols_mev,
runmode="sequential", spin_mode="polarized", acell=(8, 9, 10), smearing="fermi_dirac:0.1 eV", max_niter=50,):
"""
Args:
workdir:
Working directory.
pseudo:
string or Pseudo instance
ecut_list_or_slice:
List of cutoff energies or slice object (mainly used for infinite iterations).
atols_mev:
List of absolute tolerances in meV (3 entries corresponding to accuracy ["low", "normal", "high"]
spin_mode:
Defined how the electronic spin will be treated.
acell:
Lengths of the periodic box in Bohr.
smearing:
Smearing instance or string in the form "mode:tsmear". Default: FemiDirac with T=0.1 eV
"""
self.pseudo = Pseudo.aspseudo(pseudo)
self.atols_mev = atols_mev
self.spin_mode = spin_mode
self.smearing = Smearing.assmearing(smearing)
self.acell = acell
if isinstance(ecut_list_or_slice, slice):
self.ecut_iterator = iterator_from_slice(ecut_list_or_slice)
else:
self.ecut_iterator = iter(ecut_list_or_slice)
# Construct a generator that returns strategy objects.
def strategy_generator():
for ecut in self.ecut_iterator:
yield self.strategy_with_ecut(ecut)
super(PseudoIterativeConvergence, self).__init__(
workdir, runmode, strategy_generator(), max_niter=max_niter)
if not self.isnc:
raise NotImplementedError("PAW convergence tests are not supported yet")
def strategy_with_ecut(self, ecut):
"Return a Strategy instance with given cutoff energy ecut"
# Define the system: one atom in a box of lenghts acell.
boxed_atom = AbiStructure.boxed_atom(self.pseudo, acell=self.acell)
# Gamma-only sampling.
gamma_only = KSampling.gamma_only()
# Setup electrons.
electrons = Electrons(spin_mode=self.spin_mode, smearing=self.smearing)
# Don't write WFK files.
extra_abivars = {
"ecut" : ecut,
"prtwf": 0,
}
strategy = ScfStrategy(boxed_atom, self.pseudo, gamma_only,
spin_mode=self.spin_mode, smearing=self.smearing,
charge=0.0, scf_algorithm=None,
use_symmetries=True, **extra_abivars)
return strategy
@property
def ecut_list(self):
"""The list of cutoff energies computed so far"""
return [float(task.strategy.ecut) for task in self]
def check_etotal_convergence(self, *args, **kwargs):
return compute_hints(self.ecut_list, self.read_etotal(), self.atols_mev,
self.pseudo)
def exit_iteration(self, *args, **kwargs):
return self.check_etotal_convergence(self, *args, **kwargs)
def get_results(self, *args, **kwargs):
# Get the results of the tasks.
wf_results = super(PseudoIterativeConvergence, self).get_results()
data = self.check_etotal_convergence()
plot_etotal(data["ecut_list"], data["etotal"], data["aug_ratios"],
show=False, savefig=self.path_in_workdir("etotal.pdf"))
wf_results.update(data)
if not monotonic(data["etotal"], mode="<", atol=1.0e-5):
print("E(ecut) is not decreasing")
wf_results.push_exceptions("E(ecut) is not decreasing")
if kwargs.get("json_dump", True):
wf_results.json_dump(self.path_in_workdir("results.json"))
return wf_results
################################################################################
class BandStructure(Workflow):
def __init__(self, workdir, runmode, scf_strategy, nscf_strategy,
dos_strategy=None):
super(BandStructure, self).__init__(workdir, runmode)
# Register the GS-SCF run.
scf_link = self.register_task(scf_strategy)
# Register the NSCF run and its dependency
self.register_task(nscf_strategy, links=scf_link.produces_exts("_DEN"))
# Add DOS computation
if dos_strategy is not None:
self.register_task(dos_strategy,
links=scf_link.produces_exts("_DEN"))
################################################################################
class Relaxation(Workflow):
def __init__(self, workdir, runmode, relax_strategy):
super(Relaxation, self).__init__(workdir, runmode)
link = self.register_task(relax_strategy)
################################################################################
class DeltaTest(Workflow):
def __init__(self, workdir, runmode, structure_or_cif, pseudos, kppa,
spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
accuracy="normal",
ecut=None, ecutsm=0.05, chksymbreak=0): # FIXME Hack
super(DeltaTest, self).__init__(workdir, runmode)
if isinstance(structure_or_cif, Structure):
structure = structure_or_cif
else:
# Assume CIF file
structure = read_structure(structure_or_cif)
structure = AbiStructure.asabistructure(structure)
smearing = Smearing.assmearing(smearing)
self._input_structure = structure
v0 = structure.volume
self.volumes = v0 * np.arange(90, 112, 2) / 100.
for vol in self.volumes:
new_lattice = structure.lattice.scale(vol)
new_structure = Structure(new_lattice, structure.species,
structure.frac_coords)
new_structure = AbiStructure.asabistructure(new_structure)
extra_abivars = {
"ecutsm": ecutsm,
"prtwf" : 0,
}
if ecut is not None:
extra_abivars.update({"ecut": ecut})
ksampling = KSampling.automatic_density(new_structure, kppa,
chksymbreak=chksymbreak)
scf_strategy = ScfStrategy(new_structure, pseudos, ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, **extra_abivars)
self.register_task(scf_strategy)
def get_results(self, *args, **kwargs):
num_sites = self._input_structure.num_sites
etotal = Ha2eV(self.read_etotal())
wf_results = super(DeltaTest, self).get_results()
wf_results.update({
"etotal" : list(etotal),
"volumes" : list(self.volumes),
"natom" : num_sites,
"dojo_level": 1,
})
from .eos import EOS
try:
eos_fit = EOS.Murnaghan().fit(self.volumes, etotal)
print(eos_fit)
eos_fit.plot(show=False, savefig=self.path_in_workdir("eos.pdf"))
wf_results.update({
"v0": eos_fit.v0,
"b" : eos_fit.b,
"bp": eos_fit.bp,
})
except EOS.Error as exc:
wf_results.push_exceptions(exc)
if kwargs.get("json_dump", True):
wf_results.json_dump(self.path_in_workdir("results.json"))
# Write data for the computation of the delta factor
with open(self.path_in_workdir("deltadata.txt"), "w") as fh:
fh.write("# Volume/natom [Ang^3] Etotal/natom [eV]\n")
for (v, e) in zip(self.volumes, etotal):
fh.write("%s %s\n" % (v/num_sites, e/num_sites))
return wf_results
################################################################################
class GW_Workflow(Workflow):
def __init__(self, workdir, runmode, scf_strategy, nscf_strategy,
scr_strategy, sigma_strategy):
"""
Workflow for GW calculations.
Args:
workdir:
Working directory of the calculation.
runmode:
Run mode.
scf_strategy:
SCFStrategy instance
nscf_strategy:
NSCFStrategy instance
scr_strategy:
Strategy for the screening run.
sigma_strategy:
Strategy for the self-energy run.
"""
super(GW_Workflow, self).__init__(workdir, runmode)
# Register the GS-SCF run.
scf_link = self.register_task(scf_strategy)
# Construct the input for the NSCF run.
nscf_link = self.register_task(nscf_strategy,
links=scf_link.produces_exts("_DEN"))
# Register the SCR run.
screen_link = self.register_task(scr_strategy,
links=nscf_link.produces_exts("_WFK"))
# Register the SIGMA run.
sigma_links = [nscf_link.produces_exts("_WFK"),
screen_link.produces_exts("_SCR"),]
self.register_task(sigma_strategy, links=sigma_links)
################################################################################
| [
"pymatgen.core.physical_constants.Ha2meV",
"pymatgen.util.num_utils.chunks",
"pymatgen.core.structure.Structure",
"pymatgen.util.num_utils.iterator_from_slice",
"numpy.arange",
"os.path.exists",
"shutil.move",
"pymatgen.serializers.json_coders.json_pretty_dump",
"functools.wraps",
"numpy.max",
"... | [((1455, 1478), 'functools.wraps', 'functools.wraps', (['method'], {}), '(method)\n', (1470, 1478), False, 'import functools\n'), ((24139, 24151), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24149, 24151), True, 'import matplotlib.pyplot as plt\n'), ((9780, 9820), 'pymatgen.serializers.json_coders.json_pretty_dump', 'json_pretty_dump', (['self.to_dict', 'filename'], {}), '(self.to_dict, filename)\n', (9796, 9820), False, 'from pymatgen.serializers.json_coders import MSONable, json_pretty_dump\n'), ((10671, 10695), 'os.path.abspath', 'os.path.abspath', (['workdir'], {}), '(workdir)\n', (10686, 10695), False, 'import os\n'), ((10902, 10931), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (10925, 10931), False, 'import collections\n'), ((11172, 11196), 'pymatgen.util.num_utils.chunks', 'chunks', (['self', 'chunk_size'], {}), '(self, chunk_size)\n', (11178, 11196), False, 'from pymatgen.util.num_utils import iterator_from_slice, chunks\n'), ((12285, 12321), 'os.path.join', 'os.path.join', (['self.workdir', 'filename'], {}), '(self.workdir, filename)\n', (12297, 12321), False, 'import os\n'), ((13529, 13614), 'pymatgen.io.abinitio.task.task_factory', 'task_factory', (['strategy', 'task_workdir', 'self.runmode'], {'task_id': 'task_id', 'links': 'links'}), '(strategy, task_workdir, self.runmode, task_id=task_id, links=links\n )\n', (13541, 13614), False, 'from pymatgen.io.abinitio.task import task_factory, Task\n'), ((14745, 14772), 'shutil.rmtree', 'shutil.rmtree', (['self.workdir'], {}), '(self.workdir)\n', (14758, 14772), False, 'import shutil\n'), ((15353, 15383), 'shutil.move', 'shutil.move', (['self.workdir', 'dst'], {}), '(self.workdir, dst)\n', (15364, 15383), False, 'import shutil\n'), ((22332, 22363), 'pymatgen.util.string_utils.pprint_table', 'pprint_table', (['table'], {'out': 'stream'}), '(table, out=stream)\n', (22344, 22363), False, 'from pymatgen.util.string_utils import pprint_table\n'), ((24549, 24561), 'pymatgen.core.physical_constants.Ha2meV', 'Ha2meV', (['etot'], {}), '(etot)\n', (24555, 24561), False, 'from pymatgen.core.physical_constants import Bohr2Ang, Ang2Bohr, Ha2eV, Ha_eV, Ha2meV\n'), ((25201, 25211), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25209, 25211), True, 'import matplotlib.pyplot as plt\n'), ((13923, 13951), 'os.path.exists', 'os.path.exists', (['self.workdir'], {}), '(self.workdir)\n', (13937, 13951), False, 'import os\n'), ((13965, 13990), 'os.makedirs', 'os.makedirs', (['self.workdir'], {}), '(self.workdir)\n', (13976, 13990), False, 'import os\n'), ((24654, 24664), 'numpy.max', 'np.max', (['yy'], {}), '(yy)\n', (24660, 24664), True, 'import numpy as np\n'), ((28390, 28429), 'pymatgen.util.num_utils.iterator_from_slice', 'iterator_from_slice', (['ecut_list_or_slice'], {}), '(ecut_list_or_slice)\n', (28409, 28429), False, 'from pymatgen.util.num_utils import iterator_from_slice, chunks\n'), ((32513, 32545), 'pymatgen.io.smartio.read_structure', 'read_structure', (['structure_or_cif'], {}), '(structure_or_cif)\n', (32527, 32545), False, 'from pymatgen.io.smartio import read_structure\n'), ((32907, 32971), 'pymatgen.core.structure.Structure', 'Structure', (['new_lattice', 'structure.species', 'structure.frac_coords'], {}), '(new_lattice, structure.species, structure.frac_coords)\n', (32916, 32971), False, 'from pymatgen.core.structure import Structure\n'), ((15308, 15337), 'os.path.dirname', 'os.path.dirname', (['self.workdir'], {}), '(self.workdir)\n', (15323, 15337), False, 'import os\n'), ((32759, 32780), 'numpy.arange', 'np.arange', (['(90)', '(112)', '(2)'], {}), '(90, 112, 2)\n', (32768, 32780), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[10]:
import os
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import random as rd
# In[11]:
def readFile(folderPath):
with open(folderPath, 'r') as f:
fileContents = f.readlines()
return fileContents
# In[12]:
def fillInfoFromContents(fileContents, info):
for i, content in enumerate(fileContents):
if i == 0:
info['Instance Name'].append(content.split()[2])
elif i == 1:
info['Number of Nodes'].append([int(word) for word in content.split() if word.isdigit()][0])
elif i == 2:
info['Required Edges'].append([int(word) for word in content.split() if word.isdigit()][0])
elif i == 3:
c = [int(word) for word in content.split() if word.isdigit()][0]
elif i == 6:
info['Capacity'].append([int(word) for word in content.split() if word.isdigit()][0])
elif i == 9:
info['Depot Nodes'].append([int(word) for word in content.split() if word.isdigit()])
info['Number of Depot Nodes'].append(len(info['Depot Nodes'][-1]))
info['Number of Edges'].append(c + info['Required Edges'][-1])
# In[13]:
def readAndStoreInstanceInfo(folderPath):
info = {'Instance Name' : [],
'Number of Nodes' : [],
'Number of Edges' : [],
'Required Edges' : [],
'Capacity' : [],
'Number of Depot Nodes' : [],
'Depot Nodes' : []}
for i, file in enumerate(os.listdir(folderPath)):
if file.endswith(".txt"):
file_path = f"{folderPath}/{file}"
fileContents = readFile(file_path)
fillInfoFromContents(fileContents, info)
df = pd.DataFrame(data=info,columns=['Instance Name','Number of Nodes', 'Number of Edges',
'Required Edges', 'Capacity', 'Number of Depot Nodes', 'Depot Nodes'])
print(df.columns)
df.to_csv("DeArmon_dataset_info.csv")
df.sort_values(by='Number of Edges', ascending=False)
return info
# In[14]:
def createGraphfromFile(file, info, index):
fileContents = readFile(file)
s = ["LIST_REQ_EDGES :\n", "LIST_NOREQ_EDGES :\n"]
startProcessing = False
startNode = []
endNode = []
edgeWeight = []
i = 0
for contents in fileContents:
if contents == s[i] and startProcessing:
startProcessing = False
break
if startProcessing:
startNode.append([int(letters) for word in contents.split() for letters in word.split(",") if letters.isdigit()][0])
endNode.append([int(letters) for word in contents.split() for letters in word.split(",") if letters.isdigit()][1])
edgeWeight.append([int(letters) for word in contents.split() for letters in word.split(",") if letters.isdigit()][2])
if contents == s[i]:
startProcessing = True
i += 1
requiredEdges = []
for i in range(info['Required Edges'][index]):
requiredEdges.append([startNode[i], endNode[i]])
return startNode, endNode, edgeWeight
# In[15]:
def plotGraph(depotNodes ,requiredEdges, numNodes, s, t, weights, show=True):
G = nx.Graph()
edges = []
for i in range(len(s)):
edges.append((s[i], t[i], weights[i]))
for i in range(1, numNodes+1):
G.add_node(i)
pos = nx.spring_layout(G)
node_color = ['y']*int(G.number_of_nodes())
depot_node_color = node_color
for i in range(1, len(node_color)+1):
if i in depotNodes:
depot_node_color[i-1] = 'g'
G.add_weighted_edges_from(edges)
labels = nx.get_edge_attributes(G,'weight')
nx.draw_networkx(G,pos, node_color = node_color)
nx.draw_networkx(G,pos, node_color = depot_node_color)
nx.draw_networkx_edges(G, pos, edgelist=requiredEdges, width=3, alpha=0.5,
edge_color="r")
nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)
if show:
plt.figure(1)
plt.show()
return G,pos, node_color, depot_node_color, edges
# In[16]:
def creatingIcyRoadInstance(file, info, index, startNode, endNode, edgeWeight):
newDepotNodes = []
requiredEdgeIndexes = []
newRequiredEdges = []
count = 0
while count <= (info['Number of Nodes'][index]//5):
node = rd.randint(1, info['Number of Nodes'][index])
if node not in newDepotNodes:
newDepotNodes.append(node)
count += 1
count = 0
while count <= (info['Number of Edges'][index]//3):
edge = rd.randint(0, info['Number of Edges'][index])
if edge not in requiredEdgeIndexes:
requiredEdgeIndexes.append(edge)
count += 1
for i in range(info['Number of Edges'][index]):
if i in requiredEdgeIndexes:
newRequiredEdges.append([startNode[i], endNode[i]])
G,pos, node_color, depot_node_color, edges = plotGraph(newDepotNodes, newRequiredEdges, info['Number of Nodes'][index], startNode, endNode, edgeWeight)
# plt.savefig('../IcyRoad Instances from DeArmon\icy_road_' + info['Instance Name'][index] + '.png')
# plt.show()
return G,pos, node_color, depot_node_color, edges, newDepotNodes, newRequiredEdges, 2*max(edgeWeight), G.number_of_nodes()
# In[25]:
# def createGraph(inputType = 'txt'):
# # folderPath = '../CARP_datasets/DeArmon_gdb-IF'
# # for i, file in enumerate(os.listdir(folderPath)):
# # if file.endswith(".txt"):
# # file_path = f"{folderPath}/{file}"
# file_path = '../CARP_datasets/DeArmon_gdb-IF/gdb-IF-01.txt'
# info = readAndStoreInstanceInfo('../../../CARP_datasets/DeArmon_gdb-IF')
# startNode, endNode, edgeWeight = createGraphfromFile(file_path, info, 0)
# G, depotNodes, requiredNodes, vehicleCapacity, numNodes = creatingIcyRoadInstance(file_path, info, 0, startNode, endNode, edgeWeight)
# return G, depotNodes, requiredNodes, vehicleCapacity, numNodes
# In[ ]:
| [
"os.listdir",
"networkx.draw_networkx_edge_labels",
"networkx.get_edge_attributes",
"networkx.spring_layout",
"networkx.Graph",
"networkx.draw_networkx",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"networkx.draw_networkx_edges",
"random.randint",
"matplotlib.pyplot.show"
] | [((1771, 1937), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'info', 'columns': "['Instance Name', 'Number of Nodes', 'Number of Edges', 'Required Edges',\n 'Capacity', 'Number of Depot Nodes', 'Depot Nodes']"}), "(data=info, columns=['Instance Name', 'Number of Nodes',\n 'Number of Edges', 'Required Edges', 'Capacity',\n 'Number of Depot Nodes', 'Depot Nodes'])\n", (1783, 1937), True, 'import pandas as pd\n'), ((3262, 3272), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3270, 3272), True, 'import networkx as nx\n'), ((3439, 3458), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (3455, 3458), True, 'import networkx as nx\n'), ((3714, 3749), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['G', '"""weight"""'], {}), "(G, 'weight')\n", (3736, 3749), True, 'import networkx as nx\n'), ((3753, 3800), 'networkx.draw_networkx', 'nx.draw_networkx', (['G', 'pos'], {'node_color': 'node_color'}), '(G, pos, node_color=node_color)\n', (3769, 3800), True, 'import networkx as nx\n'), ((3806, 3859), 'networkx.draw_networkx', 'nx.draw_networkx', (['G', 'pos'], {'node_color': 'depot_node_color'}), '(G, pos, node_color=depot_node_color)\n', (3822, 3859), True, 'import networkx as nx\n'), ((3865, 3959), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'edgelist': 'requiredEdges', 'width': '(3)', 'alpha': '(0.5)', 'edge_color': '"""r"""'}), "(G, pos, edgelist=requiredEdges, width=3, alpha=0.5,\n edge_color='r')\n", (3887, 3959), True, 'import networkx as nx\n'), ((4000, 4056), 'networkx.draw_networkx_edge_labels', 'nx.draw_networkx_edge_labels', (['G', 'pos'], {'edge_labels': 'labels'}), '(G, pos, edge_labels=labels)\n', (4028, 4056), True, 'import networkx as nx\n'), ((1555, 1577), 'os.listdir', 'os.listdir', (['folderPath'], {}), '(folderPath)\n', (1565, 1577), False, 'import os\n'), ((4078, 4091), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4088, 4091), True, 'import matplotlib.pyplot as plt\n'), ((4100, 4110), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4108, 4110), True, 'import matplotlib.pyplot as plt\n'), ((4422, 4467), 'random.randint', 'rd.randint', (['(1)', "info['Number of Nodes'][index]"], {}), "(1, info['Number of Nodes'][index])\n", (4432, 4467), True, 'import random as rd\n'), ((4653, 4698), 'random.randint', 'rd.randint', (['(0)', "info['Number of Edges'][index]"], {}), "(0, info['Number of Edges'][index])\n", (4663, 4698), True, 'import random as rd\n')] |
import pytest
from edera import Heap
def test_heap_is_initially_empty():
assert not Heap()
def test_pushing_items_increases_heap_size():
heap = Heap()
for i in range(1, 6):
heap.push(str(i), 0)
assert len(heap) == i
def test_top_of_heap_always_has_highest_priority():
heap = Heap()
for i in range(1, 6):
heap.push(str(i), -i)
assert heap.top == "1"
for i in range(1, 6):
heap.push(str(i), i)
assert heap.top == str(i)
def test_heap_pops_items_in_correct_order():
heap = Heap()
for i in range(1, 6):
heap.push(str(i), i)
assert heap.pop() == "5"
for i in range(5, 10):
heap.push(str(i), i)
for i in range(9, 0, -1):
assert heap.pop() == str(i)
assert not heap
def test_accessing_empty_heap_gives_assertion_error():
heap = Heap()
with pytest.raises(AssertionError):
return heap.top
def test_popping_from_empty_heap_gives_assertion_error():
heap = Heap()
with pytest.raises(AssertionError):
heap.pop()
def test_heap_ordering_is_stable():
heap = Heap()
for i in range(1, 6):
heap.push(str(i), 0)
for i in range(6, 10):
heap.push(str(i), 0)
for i in range(1, 10):
assert heap.pop() == str(i)
assert not heap
| [
"edera.Heap",
"pytest.raises"
] | [((157, 163), 'edera.Heap', 'Heap', ([], {}), '()\n', (161, 163), False, 'from edera import Heap\n'), ((314, 320), 'edera.Heap', 'Heap', ([], {}), '()\n', (318, 320), False, 'from edera import Heap\n'), ((555, 561), 'edera.Heap', 'Heap', ([], {}), '()\n', (559, 561), False, 'from edera import Heap\n'), ((856, 862), 'edera.Heap', 'Heap', ([], {}), '()\n', (860, 862), False, 'from edera import Heap\n'), ((998, 1004), 'edera.Heap', 'Heap', ([], {}), '()\n', (1002, 1004), False, 'from edera import Heap\n'), ((1113, 1119), 'edera.Heap', 'Heap', ([], {}), '()\n', (1117, 1119), False, 'from edera import Heap\n'), ((91, 97), 'edera.Heap', 'Heap', ([], {}), '()\n', (95, 97), False, 'from edera import Heap\n'), ((872, 901), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (885, 901), False, 'import pytest\n'), ((1014, 1043), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1027, 1043), False, 'import pytest\n')] |
''' 7. Desenvolva um programa que leia as duas notas de um aluno, calcule
e mostre a sua média. '''
import sys
try:
n1 = float(input("Primeira nota do aluno: "))
except Exception as error:
print("Voce deve informar apenas numeros")
sys.exit()
try:
n2 = float(input("Segunda nota do aluno: "))
except Exception as error:
print("Voce deve informar apenas numeros")
sys.exit()
media = (n1 + n2) / 2
print("A média entre {:.1f} e {:.1f} é igual a {:.1f}".format(n1, n2, media)) | [
"sys.exit"
] | [((247, 257), 'sys.exit', 'sys.exit', ([], {}), '()\n', (255, 257), False, 'import sys\n'), ((391, 401), 'sys.exit', 'sys.exit', ([], {}), '()\n', (399, 401), False, 'import sys\n')] |
import streamlit as st
st.image(
"https://datascientest.com/wp-content/uploads/2020/10/logo-text-right.png.webp"
)
st.header("Développer et déployer une application de Machine learning en **Streamlit**")
st.info("Webinar du 04/05/2021")
st.markdown("---")
st.markdown(
"""
**Objectifs 🎯**
* Se familiariser avec Streamlit
* Découvrir les différents types de widgets
* Créér une démo d'application de Machine Learning
* Déployer cette application 🚀
"""
)
first_name = st.sidebar.text_input("Prénom")
last_name = st.sidebar.text_input("Nom")
job = st.sidebar.selectbox(
"Profession",
options=("Data Scientist", "Data Engineer", "Développeur", "Autre"),
)
experience = st.sidebar.slider(
"Années d'expériences", min_value=0, max_value=10, value=2, step=1
)
interests = st.sidebar.multiselect(
"Intérêts",
options=["technologie", "IA", "développement", "python", "statistiques", "R"],
default=["python", "IA"],
)
| [
"streamlit.markdown",
"streamlit.image",
"streamlit.sidebar.text_input",
"streamlit.sidebar.multiselect",
"streamlit.sidebar.slider",
"streamlit.sidebar.selectbox",
"streamlit.info",
"streamlit.header"
] | [((24, 123), 'streamlit.image', 'st.image', (['"""https://datascientest.com/wp-content/uploads/2020/10/logo-text-right.png.webp"""'], {}), "(\n 'https://datascientest.com/wp-content/uploads/2020/10/logo-text-right.png.webp'\n )\n", (32, 123), True, 'import streamlit as st\n'), ((121, 219), 'streamlit.header', 'st.header', (['"""Développer et déployer une application de Machine learning en **Streamlit**"""'], {}), "(\n 'Développer et déployer une application de Machine learning en **Streamlit**'\n )\n", (130, 219), True, 'import streamlit as st\n'), ((212, 244), 'streamlit.info', 'st.info', (['"""Webinar du 04/05/2021"""'], {}), "('Webinar du 04/05/2021')\n", (219, 244), True, 'import streamlit as st\n'), ((246, 264), 'streamlit.markdown', 'st.markdown', (['"""---"""'], {}), "('---')\n", (257, 264), True, 'import streamlit as st\n'), ((266, 494), 'streamlit.markdown', 'st.markdown', (['"""\n **Objectifs 🎯** \n\n * Se familiariser avec Streamlit\n * Découvrir les différents types de widgets\n * Créér une démo d\'application de Machine Learning\n * Déployer cette application 🚀\n\n"""'], {}), '(\n """\n **Objectifs 🎯** \n\n * Se familiariser avec Streamlit\n * Découvrir les différents types de widgets\n * Créér une démo d\'application de Machine Learning\n * Déployer cette application 🚀\n\n"""\n )\n', (277, 494), True, 'import streamlit as st\n'), ((505, 536), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Prénom"""'], {}), "('Prénom')\n", (526, 536), True, 'import streamlit as st\n'), ((549, 577), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Nom"""'], {}), "('Nom')\n", (570, 577), True, 'import streamlit as st\n'), ((584, 691), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Profession"""'], {'options': "('Data Scientist', 'Data Engineer', 'Développeur', 'Autre')"}), "('Profession', options=('Data Scientist',\n 'Data Engineer', 'Développeur', 'Autre'))\n", (604, 691), True, 'import streamlit as st\n'), ((713, 803), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Années d\'expériences"""'], {'min_value': '(0)', 'max_value': '(10)', 'value': '(2)', 'step': '(1)'}), '("Années d\'expériences", min_value=0, max_value=10, value=\n 2, step=1)\n', (730, 803), True, 'import streamlit as st\n'), ((818, 961), 'streamlit.sidebar.multiselect', 'st.sidebar.multiselect', (['"""Intérêts"""'], {'options': "['technologie', 'IA', 'développement', 'python', 'statistiques', 'R']", 'default': "['python', 'IA']"}), "('Intérêts', options=['technologie', 'IA',\n 'développement', 'python', 'statistiques', 'R'], default=['python', 'IA'])\n", (840, 961), True, 'import streamlit as st\n')] |
import discord
import json
import math
from discord.ext import commands
from common_functions import default_embed_template, use_exp_mat
MYSTIC = 10000
FINE = 2000
NORMAL = 400
ASCENSION_MILESTONES = [90, 80, 70, 60, 50, 40, 20]
class WeaponExpCalculator(commands.Cog):
def __init__(self, client):
self._client = client
self._calls = 0
@property
def calls(self):
return self._calls
@calls.setter
def calls(self, new_calls):
self._calls = new_calls
def format_char_stats(self, rarity, curr_level, curr_exp, curr_exp_cap, mystic_count, fine_count, normal_count):
msg = f"__Weapon__\n"
msg += f"Rarity: {rarity}:star:\n"
msg += f"Weapon level: {curr_level}\n"
msg += f"Current Exp: {curr_exp:,}/{curr_exp_cap:,}\n\n"
msg += f"__Inventory__\n"
msg += f"{mystic_count:,}x Mystic\n"
msg += f"{fine_count:,}x Fine\n"
msg += f"{normal_count:,}x Enhancement\n"
return msg
def add_exp(self, next_level_exp, curr_level, level_upto, curr_exp, mystic_count, fine_count, normal_count):
fine_ore_refunded = 0
normal_ore_refunded = 0
wasted_exp = 0
while curr_level < level_upto and mystic_count + fine_count + normal_count > 0:
total_exp_next_level = int(next_level_exp[str(curr_level)])
# Use materials until curr_level is over total_exp_next_level starting for those that give the most exp to the least
curr_exp, mystic_count = use_exp_mat(curr_exp, total_exp_next_level, mystic_count, MYSTIC, True)
curr_exp, fine_count = use_exp_mat(curr_exp, total_exp_next_level, fine_count, FINE, True)
curr_exp, normal_count = use_exp_mat(curr_exp, total_exp_next_level, normal_count, NORMAL, True)
# Calculate exp overflow
while True:
if curr_exp >= int(next_level_exp[str(curr_level)]):
if curr_level + 1 in ASCENSION_MILESTONES:
# Calculate refunded ores if any
wasted_exp = curr_exp - int(next_level_exp[str(curr_level)])
fine_ore_refunded = math.floor(wasted_exp/FINE)
fine_count += fine_ore_refunded
wasted_exp -= fine_ore_refunded*FINE
normal_ore_refunded = math.floor(wasted_exp/NORMAL)
normal_count += normal_ore_refunded
wasted_exp -= normal_ore_refunded*NORMAL
curr_exp = 0
curr_level += 1
return curr_level, curr_exp, mystic_count, fine_count, normal_count, fine_ore_refunded, normal_ore_refunded, wasted_exp
else:
curr_exp = curr_exp - int(next_level_exp[str(curr_level)])
curr_level += 1
else:
break
return curr_level, curr_exp, mystic_count, fine_count, normal_count, fine_ore_refunded, normal_ore_refunded, wasted_exp
def calculate(self, embed_msg, rarity, curr_level, goal_level, curr_exp, mystic_count, fine_count, normal_count):
with open(f"./wep_exp_per_level/wep_exp_per_level_{rarity}.json", "r") as f:
next_level_exp = json.load(f)
f.close()
if not str(curr_level) in next_level_exp:
raise commands.ArgumentParsingError(message="Please enter a valid weapon level.")
if not str(goal_level) in next_level_exp:
raise commands.ArgumentParsingError(message="Please enter a valid goal level.")
if curr_exp > int(next_level_exp[str(curr_level)]):
raise commands.ArgumentParsingError(message="Invalid current weapon experience points value.")
msg = self.format_char_stats(rarity, curr_level, curr_exp, next_level_exp[str(curr_level)], mystic_count, fine_count, normal_count)
embed_msg.add_field(name="**Before**", value=msg, inline=True)
start_mystic_count = mystic_count
start_fine_count = fine_count
start_normal_count = normal_count
total_fine_refunded = 0
total_normal_refunded = 0
while mystic_count + fine_count + normal_count > 0 and curr_level < goal_level:
prev_mystic_count = mystic_count
prev_fine_count = fine_count
prev_normal_count = normal_count
curr_upper_level_cap = 0
for level_cap in ASCENSION_MILESTONES:
if level_cap > curr_level:
curr_upper_level_cap = level_cap
else:
break
level_upto = curr_upper_level_cap
if goal_level < curr_upper_level_cap:
level_upto = goal_level
# Add exp
new_level, new_exp, mystic_count, fine_count, normal_count, fine_ore_refunded, normal_ore_refunded, wasted_exp = self.add_exp(next_level_exp, curr_level, level_upto, curr_exp, mystic_count, fine_count, normal_count)
total_fine_refunded += fine_ore_refunded
total_normal_refunded += normal_ore_refunded
embed_msg.add_field(name=f"**Leveling: {curr_level} -> {level_upto}**", value=f"Reached level {new_level:,}/{curr_upper_level_cap:,}\nCurrent exp: {new_exp:,}/{next_level_exp[str(new_level)]:,}", inline=True)
embed_msg.add_field(name=f"**Used**", value=f"{prev_mystic_count - mystic_count}x Mystic\n{prev_fine_count - fine_count +fine_ore_refunded}x Fine\n{prev_normal_count - normal_count + normal_ore_refunded}x Enhancement", inline=True)
embed_msg.add_field(name=f"**Refunded**", value=f"{fine_ore_refunded}x Fine\n{normal_ore_refunded}x Enhancement", inline=True)
curr_level = new_level
curr_exp = new_exp
msg = self.format_char_stats(rarity, curr_level, curr_exp, next_level_exp[str(curr_level)], mystic_count, fine_count, normal_count)
embed_msg.insert_field_at(index=1, name="**After**", value=msg, inline=True)
if curr_level >= goal_level:
msg = f"You have enough enhancement ores to reach level {goal_level}.\n\n"
else:
msg = f"You do not have enough enhancement ores to reach level {goal_level}.\n\n"
msg += f"__Total used__\n{start_mystic_count - mystic_count}x Mystic\n{start_fine_count - fine_count + total_fine_refunded}x Fine\n{start_normal_count - normal_count + total_normal_refunded}x Enhancement\n\n"
msg += f"__Total refunded__\n{total_fine_refunded}x Fine\n{total_normal_refunded}x Enhancement\n"
embed_msg.insert_field_at(index=2, name="**Summary**", value=msg, inline=False)
return embed_msg
# Input:
# current level, goal level, current exp, and number of mystic, fine and regular ores.
# Output:
# If enough then how many ores it will cost.
# If not enough then what level will using all of the ores will get to and how many more ores needed to reach goal.
@commands.command()
async def wep_exp(self, ctx):
self.calls += 1
args = ctx.message.content.split()
if len(args) == 8:
try:
rarity = int(args[1])
curr_level = int(args[2])
goal_level = int(args[3])
curr_exp = int(args[4])
mystic_count = int(args[5])
fine_count = int(args[6])
normal_count = int(args[7])
except ValueError:
raise commands.ArgumentParsingError(message="Please enter integer values only.")
if not rarity in [5, 4, 3, 2, 1]:
raise commands.ArgumentParsingError(message="Please enter a valid weapon rarity value.")
if curr_level > goal_level:
raise commands.ArgumentParsingError(message="Please enter current level and goal level where current level is less than goal level.")
if mystic_count < 0 or fine_count < 0 or normal_count < 0:
raise commands.ArgumentParsingError(message="Please enter number of enhancement ores greater or equal to 0.")
embed_msg = default_embed_template(ctx, self._client.user.name)
embed_msg = self.calculate(embed_msg, rarity, curr_level, goal_level, curr_exp, mystic_count, fine_count, normal_count)
await ctx.send(embed=embed_msg)
else:
await ctx.send(f"`Usage: {self._client.command_prefix}wep_exp <rarity> <curr_level> <goal_level> <curr_exp> <mystic_count> <fine_count> <normal_count>`\n`{self._client.command_prefix}help` for more details.")
@commands.command(hidden=True)
@commands.is_owner()
async def wep_exp_calls(self, ctx):
await ctx.send(content=f"Calls: {self.calls}")
def setup(client):
client.add_cog(WeaponExpCalculator(client)) | [
"discord.ext.commands.ArgumentParsingError",
"common_functions.default_embed_template",
"math.floor",
"discord.ext.commands.is_owner",
"common_functions.use_exp_mat",
"json.load",
"discord.ext.commands.command"
] | [((6129, 6147), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (6145, 6147), False, 'from discord.ext import commands\n'), ((7508, 7537), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)'}), '(hidden=True)\n', (7524, 7537), False, 'from discord.ext import commands\n'), ((7540, 7559), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (7557, 7559), False, 'from discord.ext import commands\n'), ((1375, 1446), 'common_functions.use_exp_mat', 'use_exp_mat', (['curr_exp', 'total_exp_next_level', 'mystic_count', 'MYSTIC', '(True)'], {}), '(curr_exp, total_exp_next_level, mystic_count, MYSTIC, True)\n', (1386, 1446), False, 'from common_functions import default_embed_template, use_exp_mat\n'), ((1473, 1540), 'common_functions.use_exp_mat', 'use_exp_mat', (['curr_exp', 'total_exp_next_level', 'fine_count', 'FINE', '(True)'], {}), '(curr_exp, total_exp_next_level, fine_count, FINE, True)\n', (1484, 1540), False, 'from common_functions import default_embed_template, use_exp_mat\n'), ((1569, 1640), 'common_functions.use_exp_mat', 'use_exp_mat', (['curr_exp', 'total_exp_next_level', 'normal_count', 'NORMAL', '(True)'], {}), '(curr_exp, total_exp_next_level, normal_count, NORMAL, True)\n', (1580, 1640), False, 'from common_functions import default_embed_template, use_exp_mat\n'), ((2805, 2817), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2814, 2817), False, 'import json\n'), ((2884, 2959), 'discord.ext.commands.ArgumentParsingError', 'commands.ArgumentParsingError', ([], {'message': '"""Please enter a valid weapon level."""'}), "(message='Please enter a valid weapon level.')\n", (2913, 2959), False, 'from discord.ext import commands\n'), ((3014, 3087), 'discord.ext.commands.ArgumentParsingError', 'commands.ArgumentParsingError', ([], {'message': '"""Please enter a valid goal level."""'}), "(message='Please enter a valid goal level.')\n", (3043, 3087), False, 'from discord.ext import commands\n'), ((3152, 3245), 'discord.ext.commands.ArgumentParsingError', 'commands.ArgumentParsingError', ([], {'message': '"""Invalid current weapon experience points value."""'}), "(message=\n 'Invalid current weapon experience points value.')\n", (3181, 3245), False, 'from discord.ext import commands\n'), ((7074, 7125), 'common_functions.default_embed_template', 'default_embed_template', (['ctx', 'self._client.user.name'], {}), '(ctx, self._client.user.name)\n', (7096, 7125), False, 'from common_functions import default_embed_template, use_exp_mat\n'), ((6628, 6715), 'discord.ext.commands.ArgumentParsingError', 'commands.ArgumentParsingError', ([], {'message': '"""Please enter a valid weapon rarity value."""'}), "(message=\n 'Please enter a valid weapon rarity value.')\n", (6657, 6715), False, 'from discord.ext import commands\n'), ((6753, 6890), 'discord.ext.commands.ArgumentParsingError', 'commands.ArgumentParsingError', ([], {'message': '"""Please enter current level and goal level where current level is less than goal level."""'}), "(message=\n 'Please enter current level and goal level where current level is less than goal level.'\n )\n", (6782, 6890), False, 'from discord.ext import commands\n'), ((6954, 7062), 'discord.ext.commands.ArgumentParsingError', 'commands.ArgumentParsingError', ([], {'message': '"""Please enter number of enhancement ores greater or equal to 0."""'}), "(message=\n 'Please enter number of enhancement ores greater or equal to 0.')\n", (6983, 7062), False, 'from discord.ext import commands\n'), ((6505, 6579), 'discord.ext.commands.ArgumentParsingError', 'commands.ArgumentParsingError', ([], {'message': '"""Please enter integer values only."""'}), "(message='Please enter integer values only.')\n", (6534, 6579), False, 'from discord.ext import commands\n'), ((1922, 1951), 'math.floor', 'math.floor', (['(wasted_exp / FINE)'], {}), '(wasted_exp / FINE)\n', (1932, 1951), False, 'import math\n'), ((2060, 2091), 'math.floor', 'math.floor', (['(wasted_exp / NORMAL)'], {}), '(wasted_exp / NORMAL)\n', (2070, 2091), False, 'import math\n')] |
import datetime
from cerberus import Validator as _Validator
from sqlalchemy import inspect
from sqlalchemy.orm.collections import InstrumentedList
from sqlalchemy.exc import NoInspectionAvailable
def is_sqla_obj(obj):
"""Checks if an object is a SQLAlchemy model instance."""
try:
inspect(obj)
return True
except NoInspectionAvailable:
return False
def import_into_sqla_object(model_instance, data):
""" Import a dictionary into a SQLAlchemy model instance. Only those
keys in `data` that match a column name in the model instance are
imported, everthing else is omitted.
This function does not validate the values coming in `data`.
:param model_instance: A SQLAlchemy model instance.
:param data: A python dictionary.
"""
mapper = inspect(model_instance.__class__)
for key in data:
if key in mapper.c:
setattr(model_instance, key, data[key])
return model_instance
def _get_column_default(c):
d = c.default
return d.arg if isinstance(getattr(d, "arg", None), (int, str, bool)) else None
class ExportData:
""" Creates a callable object that convert SQLAlchemy model instances
to dictionaries.
"""
def __init__(self, exclude=()):
#: A global list of column names to exclude. This takes precedence over
#: the parameters ``include`` and/or ``exclude`` of this instance call.
self.exclude = tuple(exclude)
def __call__(self, obj, include=(), exclude=()):
"""Converts SQLAlchemy models into python serializable objects. It can
take a single model or a list of models.
By default, all columns are included in the output, unless a list of
column names are provided to the parameters ``include`` or ``exclude``.
The latter has precedence over the former. Finally, the columns that
appear in the :attr:`excluded` property will be excluded, regardless of
the values that the parameters include and exclude have.
If the model is not persisted in the database, the default values of
the columns are used if they exist in the class definition. From the
example below, the value False will be used for the column active::
active = Column(Boolean, default=False)
:param obj: A instance or a list of SQLAlchemy model instances.
:param include: tuple, list or set.
:param exclude: tuple, list or set.
"""
if isinstance(obj, (list, InstrumentedList)):
try:
return [item.export_data(include, exclude) for item in obj]
except AttributeError as e:
# If the method exist, the exception comes inside of it.
if hasattr(obj[0], "export_data"):
# So re-raise the exception.
raise e
return [self(item, include, exclude) for item in obj]
try:
persisted = inspect(obj).persistent
except NoInspectionAvailable as e:
raise ValueError("Pass a valid SQLAlchemy mapped class instance")
columns = obj.__mapper__.columns
exclude = tuple(exclude) + self.exclude
data = {}
for c in columns:
name = c.name
if (not include or name in include) and name not in exclude:
column_value = getattr(obj, name)
data[name] = (
column_value
if persisted
else _get_column_default(c)
if column_value is None
else column_value
)
if persisted is True:
unloaded_relationships = inspect(obj).unloaded
relationship_keys = [
relationship.key
for relationship in obj.__class__.__mapper__.relationships
]
for key in relationship_keys:
if key not in unloaded_relationships and key not in exclude:
rproperty = getattr(obj, key)
has_export_data = hasattr(rproperty, "export_data")
data[key] = None
if has_export_data:
data[key] = rproperty.export_data()
elif rproperty:
data[key] = self(rproperty)
return data
#: Converts SQLAlchemy models into python serializable objects.
#:
#: This is an instance of :class:`ExportData` so head on to the
#: :meth:`~ExportData.__call__` method to known how this work. This instances
#: globally removes columns named ``org_id``.
export_from_sqla_object = ExportData(exclude=("org_id",))
schema_type_conversions = {
int: "integer",
str: "string",
bool: "boolean",
datetime.date: "string",
datetime.datetime: "string",
}
def generate_schema(model_class, include=(), exclude=(), exclude_rules=None):
""" Inspects a SQLAlchemy model class and returns a validation schema to be
used with the Cerberus library. The schema is generated mapping column
types and constraints to Cerberus rules:
+---------------+------------------------------------------------------+
| Cerberus Rule | Based on |
+===============+======================================================+
| type | SQLAlchemy column class used (String, Integer, etc). |
+---------------+------------------------------------------------------+
| readonly | **True** if the column is primary key. |
+---------------+------------------------------------------------------+
| required | **True** if ``Column.nullable`` is **False** or |
| | ``Column.default`` and ``Column.server_default`` |
| | **None**. |
+---------------+------------------------------------------------------+
| unique | Included only when the ``unique`` constraint is |
| | ``True``, otherwise is omitted: |
| | ``Column(unique=True)`` |
+---------------+------------------------------------------------------+
| default | Not included in the output. This is handled by |
| | SQLAlchemy or by the database engine. |
+---------------+------------------------------------------------------+
:param model_class: SQLAlchemy model class.
:param include: List of columns to include in the output.
:param exclude: List of column to exclude from the output.
:param exclude_rules: Rules to be excluded from the output.
"""
schema = {}
exclude_rules = exclude_rules or []
mapper = inspect(model_class)
for column in mapper.c:
name = column.name
if len(include) > 0 and name not in include:
continue
if name in exclude:
continue
prop = {}
python_type = column.type.python_type
prop["type"] = schema_type_conversions.get(python_type)
if prop["type"] is None:
raise LookupError("Unable to determine the column type")
if (
"readonly" not in exclude_rules
and python_type == str
and column.type.length is not None
):
prop["maxlength"] = column.type.length
if "readonly" not in exclude_rules and column.primary_key is True:
prop["readonly"] = True
if (
"required" not in exclude_rules
and column.default is None
and column.server_default is None
and column.nullable is False
and column.primary_key is False
):
prop["required"] = True
if "unique" not in exclude_rules and column.unique:
prop["unique"] = True
schema[name] = prop
return schema
class Validator(_Validator):
def __init__(self, schema, model_class=None, **kwargs):
super(Validator, self).__init__(schema, **kwargs)
self.model_class = model_class
def validate(self, document, model=None, **kwargs):
self.model = model
return super(Validator, self).validate(document, **kwargs)
def _validate_unique(self, is_unique, field, value):
"""Performs a query to the database to check value is already present
in a given column.
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if is_unique:
if not self.model_class:
raise RuntimeError(
"The rule `unique` needs a SQLAlchemy declarative class"
" to perform queries to check if the value being validated"
" is unique. Provide a class in Validator constructor."
)
filters = {field: value}
model = self.model_class.query.filter_by(**filters).first()
if model and (not self.update or model is not self.model):
self._error(field, f"Must be unique, but '{value}' already exist")
def get_key_path(key, _map):
for map_key, value in _map.items():
path = []
if map_key == key:
return [map_key]
if type(value) == dict:
_path = get_key_path(key, value)
path = ([map_key] + path + _path) if _path else []
if len(path) > 0:
return path
return None
| [
"sqlalchemy.inspect"
] | [((809, 842), 'sqlalchemy.inspect', 'inspect', (['model_instance.__class__'], {}), '(model_instance.__class__)\n', (816, 842), False, 'from sqlalchemy import inspect\n'), ((6844, 6864), 'sqlalchemy.inspect', 'inspect', (['model_class'], {}), '(model_class)\n', (6851, 6864), False, 'from sqlalchemy import inspect\n'), ((301, 313), 'sqlalchemy.inspect', 'inspect', (['obj'], {}), '(obj)\n', (308, 313), False, 'from sqlalchemy import inspect\n'), ((2980, 2992), 'sqlalchemy.inspect', 'inspect', (['obj'], {}), '(obj)\n', (2987, 2992), False, 'from sqlalchemy import inspect\n'), ((3724, 3736), 'sqlalchemy.inspect', 'inspect', (['obj'], {}), '(obj)\n', (3731, 3736), False, 'from sqlalchemy import inspect\n')] |
import collections
import datetime
import logging
import os
import sys
from pathlib import Path
import numpy as np
import pdfkit as pdfkit
from bs4 import BeautifulSoup
from sklearn.metrics import mean_absolute_error, mean_squared_error, confusion_matrix, classification_report, \
accuracy_score
from tldextract import tldextract
from sklearn.externals import joblib
from coffeeandnoodles.core.util import get_md5_from_string
from trustworthiness.config import DeFactoConfig
from trustworthiness.definitions import DATASET_3C_SITES_PATH, DATASET_MICROSOFT_PATH_PAGES_MISSING, \
DATASET_MICROSOFT_PATH_PAGES_CACHED, ENC_WEB_DOMAIN, ENC_WEB_DOMAIN_SUFFIX, DATASET_MICROSOFT_PATH, OUTPUT_FOLDER, \
ENC_TAGS
import re
config = DeFactoConfig()
def filterTerm(word):
if word is not None:
temp = word.lower()
return re.sub(r"[^A-Za-z]+", '', temp)
else:
return ''
def print_report_regression(clf_name, predictions, y_test, targets):
print('MAE', mean_absolute_error(y_test, predictions))
print('RMSE', np.math.sqrt(mean_squared_error(y_test, predictions)))
print("-----------------------------------------------------------------------")
def print_report(clf_name, predictions, y_test, targets):
print("Classifier: ", clf_name)
print(confusion_matrix(y_test, predictions))
print("accuracy: ", accuracy_score(y_test, predictions))
print(classification_report(y_test, predictions, target_names=targets))
# print(":: recall: ", recall_score(y_test, predictions, average='weighted'))
# print(":: precision: ", precision_score(y_test, predictions, average='weighted'))
# print(":: f1: ", f1_score(y_test, predictions, average='weighted'))
print("-----------------------------------------------------------------------")
def get_logger(name, dir, file_level=logging.DEBUG, console_level=logging.INFO):
try:
logger = logging.getLogger(name)
if len(logger.handlers) == 0:
now = datetime.datetime.now()
filename = dir + name + '_' + now.strftime("%Y-%m-%d") + '.log'
formatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
fileHandler = logging.FileHandler(filename)
fileHandler.setFormatter(formatter)
fileHandler.setLevel(file_level)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
consoleHandler.setLevel(console_level)
logger.setLevel(logging.DEBUG)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
logger.propagate = False
return logger
except:
raise
def get_html_file_path(url):
path = url.replace('http://', '')
last = path.split('/')[-1]
path_root = None
if ('.html' not in last) and ('.htm' not in last) and ('.shtml' not in last):
if path[-1] != '/':
path = path + '/'
path_root1 = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path + 'index.html')
path_root2 = Path(DATASET_MICROSOFT_PATH_PAGES_MISSING + path + 'index.html')
else:
path_root1 = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path)
path_root2 = Path(DATASET_MICROSOFT_PATH_PAGES_MISSING + path)
if path_root1.exists():
path_root = path_root1
elif path_root2.exists():
path_root = path_root2
else:
# sometimes the last part is not a folder, but the file itself without the ".html" , try it as a last attempt
path_root3a = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.html')
path_root3b = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.htm')
path_root3c = Path(DATASET_MICROSOFT_PATH_PAGES_CACHED + path.replace(last, '') + last + '.shtml')
if path_root3a.exists():
path_root = path_root3a
elif path_root3b.exists():
path_root = path_root3b
elif path_root3c.exists():
path_root = path_root3c
else:
# url_broken.append(url)
raise Exception(
':: this should not happen, double check core/web/credibility/fix_dataset_microsoft.py | url = ' + url)
return path_root
def save_encoder_html2seq(folder_html_data):
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
config.logger.info('get_encoder_html2seq()')
try:
tags_set = []
#sentences = []
tot_files = 0
#my_file = Path(folder_html_data + 'features.html2seq.pkl')
my_encoder = Path(ENC_TAGS)
#path_html2seq = folder_html_data + 'html2seq/'
#path_html = folder_html_data + 'html/'
#path_text = folder_html_data + 'text/'
for dirpath, dirs, files in os.walk(folder_html_data):
for file_html in files:
if file_html.endswith('.txt'):
tot_files += 1
config.logger.info('processing file ' + str(tot_files) + ' - ' + str(len(tags_set)))
# get tags
tags = []
soup = BeautifulSoup(open(os.path.join(dirpath, file_html)), "html.parser")
html = soup.prettify()
for line in html.split('\n'):
if isinstance(line, str) and len(line.strip()) > 0:
if (line.strip()[0] == '<') and (line.strip()[0:2] != '<!'):
if len(line.split()) > 1:
tags.append(line.split()[0] + '>')
else:
tags.append(line.split()[0])
elif (line.strip()[0:2] == '</' and line.strip()[0:2] != '<!'):
tags.append(line.split()[0])
if len(tags) > 0:
#sentences.append(tags)
tags_set.extend(tags)
tags_set = list(set(tags_set))
else:
config.logger.info('no tags for this file...')
config.logger.info('saving dump')
le.fit(tags_set)
joblib.dump(le, str(my_encoder))
config.logger.info('tot files: ' + str(tot_files))
config.logger.info('dictionary size: ' + str(len(tags_set)))
except Exception as e:
config.logger.error(repr(e))
raise
def save_encoder_domain_and_suffix():
import pandas as pd
from sklearn import preprocessing
le1 = preprocessing.LabelEncoder()
le2 = preprocessing.LabelEncoder()
domain_s = ['com']
domain_s = ['']
domain = ['']
df_sites = pd.read_csv(DATASET_3C_SITES_PATH, na_values=0, delimiter=',', usecols=['document_url'])
for index, row in df_sites.iterrows():
url = str(row[0])
print(index, url)
try:
o = tldextract.extract(url)
if o.suffix is not None:
domain_s.append(str(o.suffix).lower())
if o.domain is not None:
domain.append(str(o.domain).lower())
except:
continue
# appending upper level domains, from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
# Version 2018040300, Last Updated Tue Apr 3 07:07:01 2018 UTC
df = pd.read_csv(config.datasets + 'data/iana/org/TLD/tlds-alpha-by-domain.txt', sep=" ", header=None)
for index, row in df.iterrows():
print(index, row[0])
domain.append(str(row[0]).lower())
df = pd.read_csv(DATASET_MICROSOFT_PATH, delimiter='\t', header=0)
for index, row in df.iterrows():
url = str(row[3])
print(index, url)
try:
o = tldextract.extract(url)
if o.suffix is not None:
domain_s.append(str(o.suffix).lower())
if o.domain is not None:
domain.append(str(o.domain).lower())
except:
continue
le1.fit(domain)
joblib.dump(le1, ENC_WEB_DOMAIN)
print(le1.classes_)
le2.fit(domain_s)
joblib.dump(le2, ENC_WEB_DOMAIN_SUFFIX)
print(le2.classes_)
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
def save_url_body(extractor):
try:
config.logger.info('extracting features for: ' + extractor.url)
hash = get_md5_from_string(extractor.local_file_path)
text=extractor.webscrap.get_body()
with open(config.root_dir_data + 'marseille/input/' + hash + '.txt', "w") as file:
file.write(text)
except Exception as e:
config.logger.error(repr(e))
raise
if __name__ == '__main__':
save_encoder_domain_and_suffix()
# save_encoder_html2seq('/Users/diegoesteves/DropDrive/CloudStation/experiments_cache/web_credibility/output/all_html/') # just copy and paste all html files into a single temp file to generate this. | [
"logging.getLogger",
"sklearn.preprocessing.LabelEncoder",
"logging.StreamHandler",
"pandas.read_csv",
"sklearn.metrics.classification_report",
"trustworthiness.config.DeFactoConfig",
"os.walk",
"tldextract.tldextract.extract",
"pathlib.Path",
"logging.FileHandler",
"sklearn.metrics.mean_absolut... | [((739, 754), 'trustworthiness.config.DeFactoConfig', 'DeFactoConfig', ([], {}), '()\n', (752, 754), False, 'from trustworthiness.config import DeFactoConfig\n'), ((4423, 4451), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (4449, 4451), False, 'from sklearn import preprocessing\n'), ((6639, 6667), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (6665, 6667), False, 'from sklearn import preprocessing\n'), ((6678, 6706), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (6704, 6706), False, 'from sklearn import preprocessing\n'), ((6785, 6878), 'pandas.read_csv', 'pd.read_csv', (['DATASET_3C_SITES_PATH'], {'na_values': '(0)', 'delimiter': '""","""', 'usecols': "['document_url']"}), "(DATASET_3C_SITES_PATH, na_values=0, delimiter=',', usecols=[\n 'document_url'])\n", (6796, 6878), True, 'import pandas as pd\n'), ((7411, 7512), 'pandas.read_csv', 'pd.read_csv', (["(config.datasets + 'data/iana/org/TLD/tlds-alpha-by-domain.txt')"], {'sep': '""" """', 'header': 'None'}), "(config.datasets + 'data/iana/org/TLD/tlds-alpha-by-domain.txt',\n sep=' ', header=None)\n", (7422, 7512), True, 'import pandas as pd\n'), ((7628, 7689), 'pandas.read_csv', 'pd.read_csv', (['DATASET_MICROSOFT_PATH'], {'delimiter': '"""\t"""', 'header': '(0)'}), "(DATASET_MICROSOFT_PATH, delimiter='\\t', header=0)\n", (7639, 7689), True, 'import pandas as pd\n'), ((8077, 8109), 'sklearn.externals.joblib.dump', 'joblib.dump', (['le1', 'ENC_WEB_DOMAIN'], {}), '(le1, ENC_WEB_DOMAIN)\n', (8088, 8109), False, 'from sklearn.externals import joblib\n'), ((8161, 8200), 'sklearn.externals.joblib.dump', 'joblib.dump', (['le2', 'ENC_WEB_DOMAIN_SUFFIX'], {}), '(le2, ENC_WEB_DOMAIN_SUFFIX)\n', (8172, 8200), False, 'from sklearn.externals import joblib\n'), ((847, 877), 're.sub', 're.sub', (['"""[^A-Za-z]+"""', '""""""', 'temp'], {}), "('[^A-Za-z]+', '', temp)\n", (853, 877), False, 'import re\n'), ((994, 1034), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1013, 1034), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, confusion_matrix, classification_report, accuracy_score\n'), ((1300, 1337), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1316, 1337), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, confusion_matrix, classification_report, accuracy_score\n'), ((1363, 1398), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1377, 1398), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, confusion_matrix, classification_report, accuracy_score\n'), ((1410, 1474), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'predictions'], {'target_names': 'targets'}), '(y_test, predictions, target_names=targets)\n', (1431, 1474), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, confusion_matrix, classification_report, accuracy_score\n'), ((1914, 1937), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1931, 1937), False, 'import logging\n'), ((3027, 3090), 'pathlib.Path', 'Path', (["(DATASET_MICROSOFT_PATH_PAGES_CACHED + path + 'index.html')"], {}), "(DATASET_MICROSOFT_PATH_PAGES_CACHED + path + 'index.html')\n", (3031, 3090), False, 'from pathlib import Path\n'), ((3112, 3176), 'pathlib.Path', 'Path', (["(DATASET_MICROSOFT_PATH_PAGES_MISSING + path + 'index.html')"], {}), "(DATASET_MICROSOFT_PATH_PAGES_MISSING + path + 'index.html')\n", (3116, 3176), False, 'from pathlib import Path\n'), ((3208, 3256), 'pathlib.Path', 'Path', (['(DATASET_MICROSOFT_PATH_PAGES_CACHED + path)'], {}), '(DATASET_MICROSOFT_PATH_PAGES_CACHED + path)\n', (3212, 3256), False, 'from pathlib import Path\n'), ((3278, 3327), 'pathlib.Path', 'Path', (['(DATASET_MICROSOFT_PATH_PAGES_MISSING + path)'], {}), '(DATASET_MICROSOFT_PATH_PAGES_MISSING + path)\n', (3282, 3327), False, 'from pathlib import Path\n'), ((4669, 4683), 'pathlib.Path', 'Path', (['ENC_TAGS'], {}), '(ENC_TAGS)\n', (4673, 4683), False, 'from pathlib import Path\n'), ((4873, 4898), 'os.walk', 'os.walk', (['folder_html_data'], {}), '(folder_html_data)\n', (4880, 4898), False, 'import os\n'), ((8435, 8481), 'coffeeandnoodles.core.util.get_md5_from_string', 'get_md5_from_string', (['extractor.local_file_path'], {}), '(extractor.local_file_path)\n', (8454, 8481), False, 'from coffeeandnoodles.core.util import get_md5_from_string\n'), ((1067, 1106), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1085, 1106), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error, confusion_matrix, classification_report, accuracy_score\n'), ((1994, 2017), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2015, 2017), False, 'import datetime\n'), ((2119, 2212), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s"""'], {}), "(\n '%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s')\n", (2136, 2212), False, 'import logging\n'), ((2235, 2264), 'logging.FileHandler', 'logging.FileHandler', (['filename'], {}), '(filename)\n', (2254, 2264), False, 'import logging\n'), ((2388, 2421), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (2409, 2421), False, 'import logging\n'), ((6998, 7021), 'tldextract.tldextract.extract', 'tldextract.extract', (['url'], {}), '(url)\n', (7016, 7021), False, 'from tldextract import tldextract\n'), ((7808, 7831), 'tldextract.tldextract.extract', 'tldextract.extract', (['url'], {}), '(url)\n', (7826, 7831), False, 'from tldextract import tldextract\n'), ((5230, 5262), 'os.path.join', 'os.path.join', (['dirpath', 'file_html'], {}), '(dirpath, file_html)\n', (5242, 5262), False, 'import os\n')] |
#!/usr/bin/python
#The MIT License (MIT)
#
#Copyright (c) 2017 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
'''
Class which defines interaction with the MAX31856 sensor.
library based on
johnrbnsn/Adafruit_Python_MAX31856
steve71/MAX31856
modified for pycom LoPy by <NAME>
'''
import math
from machine import SPI
from machine import Pin
# Thermocouple Types
MAX31856_B_TYPE = 0x0 # Read B Type Thermocouple
MAX31856_E_TYPE = 0x1 # Read E Type Thermocouple
MAX31856_J_TYPE = 0x2 # Read J Type Thermocouple
MAX31856_K_TYPE = 0x3 # Read K Type Thermocouple
MAX31856_N_TYPE = 0x4 # Read N Type Thermocouple
MAX31856_R_TYPE = 0x5 # Read R Type Thermocouple
MAX31856_S_TYPE = 0x6 # Read S Type Thermocouple
MAX31856_T_TYPE = 0x7 # Read T Type Thermocouple
class MAX31856(object):
"""Class to represent an Adafruit MAX31856 thermocouple temperature
measurement board.
"""
# Board Specific Constants
MAX31856_CONST_THERM_LSB = 2**-7
MAX31856_CONST_THERM_BITS = 19
MAX31856_CONST_CJ_LSB = 2**-6
MAX31856_CONST_CJ_BITS = 14
### Register constants, see data sheet Table 6 (in Rev. 0) for info.
# Read Addresses
MAX31856_REG_READ_CR0 = 0x00
MAX31856_REG_READ_CR1 = 0x01
MAX31856_REG_READ_MASK = 0x02
MAX31856_REG_READ_CJHF = 0x03
MAX31856_REG_READ_CJLF = 0x04
MAX31856_REG_READ_LTHFTH = 0x05
MAX31856_REG_READ_LTHFTL = 0x06
MAX31856_REG_READ_LTLFTH = 0x07
MAX31856_REG_READ_LTLFTL = 0x08
MAX31856_REG_READ_CJTO = 0x09
MAX31856_REG_READ_CJTH = 0x0A # Cold-Junction Temperature Register, MSB
MAX31856_REG_READ_CJTL = 0x0B # Cold-Junction Temperature Register, LSB
MAX31856_REG_READ_LTCBH = 0x0C # Linearized TC Temperature, Byte 2
MAX31856_REG_READ_LTCBM = 0x0D # Linearized TC Temperature, Byte 1
MAX31856_REG_READ_LTCBL = 0x0E # Linearized TC Temperature, Byte 0
MAX31856_REG_READ_FAULT = 0x0F # Fault status register
# Write Addresses
MAX31856_REG_WRITE_CR0 = 0x80
MAX31856_REG_WRITE_CR1 = 0x81
MAX31856_REG_WRITE_MASK = 0x82
MAX31856_REG_WRITE_CJHF = 0x83
MAX31856_REG_WRITE_CJLF = 0x84
MAX31856_REG_WRITE_LTHFTH = 0x85
MAX31856_REG_WRITE_LTHFTL = 0x86
MAX31856_REG_WRITE_LTLFTH = 0x87
MAX31856_REG_WRITE_LTLFTL = 0x88
MAX31856_REG_WRITE_CJTO = 0x89
MAX31856_REG_WRITE_CJTH = 0x8A # Cold-Junction Temperature Register, MSB
MAX31856_REG_WRITE_CJTL = 0x8B # Cold-Junction Temperature Register, LSB
# Pre-config Register Options
MAX31856_CR0_READ_ONE = 0x40 # One shot reading, delay approx. 200ms then read temp registers
MAX31856_CR0_READ_CONT = 0x80 # Continuous reading, delay approx. 100ms between readings
MAX31856_CR0_REJECT_50Hz = 0x01 # Noise rejection filter selection
def __init__(self, tc_type=MAX31856_K_TYPE, avgsel=0x0, cs_pin='P9'):
"""Initialize MAX31856 device with hardware SPI.
Args:
tc_type (1-byte Hex): Type of Thermocouple. Choose from class variables of the form
MAX31856.MAX31856_K_TYPE.
avgsel (1-byte Hex): Type of Averaging. Choose from values in CR0 table of datasheet.
Default is single sample.
cs_pin: chip select Pin. Default P9
"""
# initialize cs_pin in gpio mode and make it an CS output
self.CS = Pin(cs_pin, mode=Pin.OUT)
self.CS(True) # init chip select
self.spi = SPI(0, mode=SPI.MASTER, baudrate=500000, polarity=0, phase=1, firstbit=SPI.MSB)
# Initialize control register 1
self.tc_type = tc_type
self.avgsel = avgsel
self.cr1 = ((self.avgsel << 4) + self.tc_type)
# Setup for reading continuously with K-Type thermocouple und 50Hz noise rejection
self._write_register(self.MAX31856_REG_WRITE_CR0, self.MAX31856_CR0_READ_CONT+self.MAX31856_CR0_REJECT_50Hz)
self._write_register(self.MAX31856_REG_WRITE_CR1, self.cr1)
@staticmethod
def _cj_temp_from_bytes(msb, lsb):
# Takes in the msb and lsb from a Cold Junction (CJ) temperature reading and
# converts it into a decimal value.
# msb (hex): Most significant byte of CJ temperature
# lsb (hex): Least significant byte of a CJ temperature
# (((msb w/o +/-) shifted by number of 1 byte above lsb)
# + val_low_byte)
# >> shifted back by # of dead bits
temp_bytes = (((msb & 0x7F) << 8) + lsb) >> 2
if msb & 0x80:
# Negative Value. Scale back by number of bits
temp_bytes -= 2**(MAX31856.MAX31856_CONST_CJ_BITS -1)
# temp_bytes*value of lsb
temp_c = temp_bytes*MAX31856.MAX31856_CONST_CJ_LSB
return temp_c
@staticmethod
def _thermocouple_temp_from_bytes(byte0, byte1, byte2):
# Converts the thermocouple byte values to a decimal value.
# byte2 (hex): Most significant byte of thermocouple temperature
# byte1 (hex): Middle byte of thermocouple temperature
# byte0 (hex): Least significant byte of a thermocouple temperature
# temp_c (float): Temperature in degrees celsius
#
# (((val_high_byte w/o +/-) shifted by 2 bytes above LSB)
# + (val_mid_byte shifted by number 1 byte above LSB)
# + val_low_byte )
# >> back shift by number of dead bits
temp_bytes = (((byte2 & 0x7F) << 16) + (byte1 << 8) + byte0)
temp_bytes = temp_bytes >> 5
if byte2 & 0x80:
temp_bytes -= 2**(MAX31856.MAX31856_CONST_THERM_BITS -1)
# temp_bytes*value of LSB
temp_c = temp_bytes*MAX31856.MAX31856_CONST_THERM_LSB
return temp_c
def read_internal_temp_c(self):
# Return internal temperature value in degrees celsius.
# Read as a multibyte transfer to ensure both bytes are from the
# same temperature update.
self.CS(False)
self.spi.write(bytes([self.MAX31856_REG_READ_CJTH])) # first read address
val_high_byte = self.spi.read(1)[0]
val_low_byte = self.spi.read(1)[0]
self.CS(True)
temp_c = MAX31856._cj_temp_from_bytes(val_high_byte, val_low_byte)
return temp_c
def read_temp_c(self):
# Return the thermocouple temperature value in degrees celsius.
# Read as a multibyte transfer to ensure all three bytes are from the
# same temperature update.
self.CS(False)
self.spi.write(bytes([self.MAX31856_REG_READ_LTCBH])) # first read address
val_high_byte = self.spi.read(1)[0]
val_mid_byte = self.spi.read(1)[0]
val_low_byte = self.spi.read(1)[0]
fault = self.spi.read(1)[0]
self.CS(True)
# check fault byte
if ((fault & 0x80) != 0):
raise MAX31856Error("Cold Junction Out-of-Range")
if ((fault & 0x40) != 0):
raise MAX31856Error("Thermocouple Out-of-Range")
if ((fault & 0x20) != 0):
raise MAX31856Error("Cold-Junction High Fault")
if ((fault & 0x10) != 0):
raise MAX31856Error("Cold-Junction Low Fault")
if ((fault & 0x08) != 0):
raise MAX31856Error("Thermocouple Temperature High Fault")
if ((fault & 0x04) != 0):
raise MAX31856Error("Thermocouple Temperature Low Fault")
if ((fault & 0x02) != 0):
raise MAX31856Error("Overvoltage or Undervoltage Input Fault")
if ((fault & 0x01) != 0):
raise MAX31856Error("Thermocouple Open-Circuit Fault")
temp_c = MAX31856._thermocouple_temp_from_bytes(val_low_byte, val_mid_byte, val_high_byte)
return temp_c
def read_fault_register(self):
# Return bytes containing fault codes and hardware problems.
reg = self._read_register(self.MAX31856_REG_READ_FAULT)
return reg
def _read_register(self, address):
# Reads a register at address from the MAX31856
# Args: address (8-bit Hex): Address for read register.
self.CS(False)
self.spi.write(bytes([address]))
value=self.spi.read(1)[0]
self.CS(True)
return value
def _write_register(self, address, write_value):
# Writes to a register at address from the MAX31856
# address (8-bit Hex): Address for read register.
# write_value (8-bit Hex): Value to write to the register
self.CS(False)
self.spi.write(bytes([address, write_value]))
self.CS(True)
# print('Wrote Register: 0x{0:02X}, Value 0x{1:02X}'.format((address & 0xFF), (write_value & 0xFF)))
return True
class MAX31856Error(Exception):
# Constructor or Initializer
def __init__(self, msg):
super(MAX31856Error, self).__init__(msg)
| [
"machine.Pin",
"machine.SPI"
] | [((4330, 4355), 'machine.Pin', 'Pin', (['cs_pin'], {'mode': 'Pin.OUT'}), '(cs_pin, mode=Pin.OUT)\n', (4333, 4355), False, 'from machine import Pin\n'), ((4417, 4496), 'machine.SPI', 'SPI', (['(0)'], {'mode': 'SPI.MASTER', 'baudrate': '(500000)', 'polarity': '(0)', 'phase': '(1)', 'firstbit': 'SPI.MSB'}), '(0, mode=SPI.MASTER, baudrate=500000, polarity=0, phase=1, firstbit=SPI.MSB)\n', (4420, 4496), False, 'from machine import SPI\n')] |
import torch
import re
import copy
import numpy
from torch.utils.data.dataloader import default_collate
from netdissect import nethook, imgviz, tally, unravelconv, upsample
def acts_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, rq, run = acts_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def grad_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = grad_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r,
batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def update_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
cinv=None,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = update_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, cinv=cinv,
batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def proj_image(model, dataset,
layer=None, unit=None,
thumbsize=None,
cachedir=None,
return_as='strip', # or individual, or tensor
k=100, r=4096, q=0.01,
batch_size=10,
sample_size=None,
num_workers=30):
assert return_as in ['strip', 'individual', 'tensor']
topk, botk, rq, run = proj_stats(model, dataset, layer=layer, unit=unit,
k=max(200, k), r=r, batch_size=batch_size, num_workers=num_workers,
sample_size=sample_size, cachedir=cachedir)
result = window_images(dataset, topk, rq, run,
thumbsize=thumbsize, return_as=return_as, k=k, q=q,
cachedir=cachedir)
if unit is not None and not hasattr(unit, '__len__'):
result = result[0]
return result
def acts_stats(model, dataset,
layer=None, unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, *args):
with nethook.Trace(module, stop=True) as ret, torch.no_grad():
model(x.to(device))
r = ret.output
if unit is not None:
r = r[:, unit]
return r
run.name = 'acts'
def compute_samples(batch, *args):
r = run(batch)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, all_r
topk, rq = tally.tally_topk_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/acts_topk_rq.npz' if cachedir else None)
return topk, rq, run
def grad_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
# Make a copy so we can disable grad on parameters
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
if layer is not None:
module = nethook.get_module(cloned_model, layer)
else:
module = cloned_model
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, y, *args):
with nethook.Trace(module, retain_grad=True) as ret, (
torch.enable_grad()):
out = cloned_model(x.to(device))
r = ret.output
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
r = -r.grad
if unit is not None:
r = r[:, unit]
return r
run.name = 'grad'
def compute_samples(x, y, *args):
r = run(x, y)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
bot_r = flat_r.min(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, bot_r, all_r
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/grad_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
def weight_grad(model, dataset, layer,
unit=None,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Make a copy so we can disable grad on parameters
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
nethook.set_requires_grad(True, module)
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
def accumulate_grad(x, y, *args):
with torch.enable_grad():
out = cloned_model(x.to(device))
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
def weight_grad():
return dict(wgrad=module.weight.grad)
module.weight.grad = None
wg = tally.tally_each(accumulate_grad, dataset, summarize=weight_grad,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/weight_grad.npz' if cachedir else None)['wgrad']
return wg
def update_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
cinv=None,
sample_size=None,
num_workers=30,
):
assert not model.training
if unit is not None:
if not hasattr(unit, '__len__'):
unit = [unit]
assert unit is None or len(unit) > 0
# get weight grad (assumes layer has a weight param)
wg = weight_grad(model, dataset, layer,
cachedir=cachedir,
batch_size=batch_size,
sample_size=sample_size,
num_workers=num_workers)
if cinv is not None:
wg = torch.mm(wg.view(-1,
cinv.shape[0]).cpu(),
cinv.cpu()).view(wg.shape)
# copy the model so we can change its weights.
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
device = next(cloned_model.parameters()).device
pin_memory = (device.type != 'cpu')
with torch.no_grad():
module.weight[...] = -wg.to(device)
if hasattr(module, 'bias') and module.bias is not None:
module.bias[...] = 0
def run(x, *args):
with nethook.Trace(module, stop=True) as ret, torch.no_grad():
cloned_model(x.to(device))
r = ret.output
if unit is not None:
r = r[:, unit]
return r
run.name = 'update' if cinv is None else 'proj'
def compute_samples(batch, *args):
r = run(batch)
flat_r = r.view(r.shape[0], r.shape[1], -1)
top_r = flat_r.max(2)[0]
bot_r = flat_r.min(2)[0]
all_r = r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
return top_r, bot_r, all_r
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/{run.name}_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
def proj_c2m(model, dataset, layer,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
cloned_model = copy.deepcopy(model)
module = nethook.get_module(cloned_model, layer)
assert isinstance(module, torch.nn.Conv2d)
nethook.set_requires_grad(False, cloned_model)
unraveled = unravelconv.unravel_left_conv2d(module)
unraveled.wconv.weight.requires_grad = True
unraveled.wconv.weight.grad = None
nethook.replace_module(cloned_model, layer, unraveled)
tconv = unraveled.tconv
def ex_run(x, *args):
with nethook.Trace(tconv, stop=True) as unrav:
cloned_model(x.to(device))
return unrav.output
def ex_sample(x, *args):
r = ex_run(x, *args)
return r.permute(0, 2, 3, 1).reshape(-1, r.shape[1])
c2m = tally.tally_second_moment(ex_sample,
dataset,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/input_cov_moment.npz' if cachedir else None)
return c2m, ex_run
def proj_stats(model, dataset, layer,
unit=None,
cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
c2m, ex_run = proj_c2m(model, dataset, layer,
batch_size=batch_size, sample_size=sample_size,
cachedir=cachedir)
# old obsolete method - not stable.
# Cinv = c2m.momentPSD().cholesky_inverse()
moment = c2m.moment()
# TODO: consider uncommenting the following, which uses
# correlation for a better-conditioned inverse.
# Change 2.0 to 3.0 to reduce amplifying near-zero feats.
# rn = moment.diag().clamp(1e-30).pow(-1/2.0)
# moment = moment * rn[None,:] * rn[:,None]
# The following is standard regularization, to try.
# moment.diagonal.add_(1e-3)
Cinv = moment.pinverse()
return update_stats(model, dataset, layer, unit=unit,
cinv=Cinv,
k=k, r=r, batch_size=batch_size, sample_size=sample_size,
cachedir=cachedir)
def window_images(dataset, topk, rq, run,
thumbsize=None,
return_as='strip', # or individual, or tensor
k=None, q=0.01,
border_color=None,
vizname=None,
cachedir=None):
assert return_as in ['strip', 'individual', 'tensor']
input_sample = default_collate([dataset[0]])
r_sample = run(*input_sample)
x_size = tuple(input_sample[0].shape[2:])
if thumbsize is None:
thumbsize = x_size
if not isinstance(thumbsize, (list, tuple)):
thumbsize = (thumbsize, thumbsize)
if topk is None:
topk = tally.range_topk(r_sample.size(1), size=(k or 1))
default_vizname = 'top' if topk.largest else 'bot'
if border_color in ['red', 'green', 'yellow']:
default_vizname += border_color
border_color = dict(red=[255.0, 0.0, 0.0], green=[0.0, 255.0, 0.0],
yellow=[255.0, 255.0, 0.0])[border_color]
if vizname is None:
vizname = default_vizname
iv = imgviz.ImageVisualizer(
thumbsize, image_size=x_size, source=dataset,
level=rq.quantiles((1.0 - q) if topk.largest else q))
func = dict(
strip=iv.masked_images_for_topk,
individual=iv.individual_masked_images_for_topk,
tensor=iv.masked_image_grid_for_topk)[return_as]
acts_images = func(run, dataset, topk, k=k, largest=topk.largest,
border_color=border_color,
cachefile=f'{cachedir}/{vizname}{k or ""}images.npz' if cachedir else None)
return acts_images
def label_stats(dataset_with_seg, num_seglabels,
run, level, upfn=None,
negate=False,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Create upfn
data_sample = default_collate([dataset_with_seg[0]])
input_sample = data_sample[:-2] + data_sample[-1:]
seg_sample = data_sample[-2]
r_sample = run(*input_sample)
r_size = tuple(r_sample.shape[2:])
seg_size = tuple(seg_sample.shape[2:])
device = r_sample.device
pin_memory = (device.type != 'cpu')
if upfn is None:
upfn = upsample.upsampler(seg_size, r_size)
def compute_concept_pair(batch, seg, *args):
seg = seg.to(device)
acts = run(batch, *args)
hacts = upfn(acts)
iacts = (hacts < level if negate else hacts > level) # indicator
iseg = torch.zeros(seg.shape[0], num_seglabels,
seg.shape[2], seg.shape[3],
dtype=torch.bool, device=seg.device)
iseg.scatter_(dim=1, index=seg, value=1)
flat_segs = iseg.permute(0, 2, 3, 1).reshape(-1, iseg.shape[1])
flat_acts = iacts.permute(0, 2, 3, 1).reshape(-1, iacts.shape[1])
return flat_segs, flat_acts
neg = 'neg' if negate else ''
iu99 = tally.tally_all_intersection_and_union(
compute_concept_pair,
dataset_with_seg,
sample_size=sample_size,
num_workers=num_workers, pin_memory=pin_memory,
cachefile=f'{cachedir}/{neg}{run.name}_iu.npz' if cachedir else None)
return iu99
def topk_label_stats(dataset_with_seg, num_seglabels,
run, level, topk, k=None,
upfn=None,
negate=False,
cachedir=None,
batch_size=10,
sample_size=None,
num_workers=30):
# Create upfn
data_sample = default_collate([dataset_with_seg[0]])
input_sample = data_sample[:-2] + data_sample[-1:]
seg_sample = data_sample[-2]
r_sample = run(*input_sample)
r_size = tuple(r_sample.shape[2:])
seg_size = tuple(seg_sample.shape[2:])
device = r_sample.device
num_units = r_sample.shape[1]
pin_memory = (device.type != 'cpu')
if upfn is None:
upfn = upsample.upsampler(seg_size, r_size)
intersections = torch.zeros(num_units, num_seglabels).to(device)
unions = torch.zeros(num_units, num_seglabels).to(device)
def collate_unit_iou(units, imgs, seg, labels):
seg = seg.to(device)
acts = run(imgs, labels)
hacts = upfn(acts)
iacts = (hacts > level) # indicator
iseg = torch.zeros(seg.shape[0], num_seglabels,
seg.shape[2], seg.shape[3],
dtype=torch.bool, device=seg.device)
iseg.scatter_(dim=1, index=seg, value=1)
for i in range(len(imgs)):
ulist = units[i]
for unit, _ in ulist:
im_i = (iacts[i, unit][None] & iseg[i]).view(
num_seglabels, -1).float().sum(1)
im_u = (iacts[i, unit][None] | iseg[i]).view(
num_seglabels, -1).float().sum(1)
intersections[unit] += im_i
unions[unit] += im_u
return []
tally.gather_topk(collate_unit_iou, dataset_with_seg, topk, k=100)
return intersections / (unions + 1e-20)
### Experiment below - find the best representative with gradient in the consensus directioin.
# 1. Tally weight grad over the dataset.
# 2. For each unit, find the topk images with gradients in the same direction as this
# consensus weight grad.
def wgrad_stats(model, dataset, layer, cachedir=None,
k=100, r=4096,
batch_size=10,
sample_size=None,
num_workers=30,
):
assert not model.training
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
cloned_model = copy.deepcopy(model)
nethook.set_requires_grad(False, cloned_model)
module = nethook.get_module(cloned_model, layer)
module.weight.requires_grad = True
module.weight.grad = None
wg = weight_grad(model, dataset, layer,
cachedir=cachedir,
batch_size=batch_size,
sample_size=sample_size,
num_workers=num_workers)
wg = wg.to(device)
module.weight.requires_grad = False
ks = module.kernel_size
unfolder = torch.nn.Conv2d(
in_channels=module.in_channels, out_channels=module.out_channels,
kernel_size=ks, padding=module.padding,
dilation=module.dilation, stride=module.stride,
bias=False)
nethook.set_requires_grad(False, unfolder)
unfolder.to(device)
unfolder.weight[...] = wg
def run(x, y, *args, return_details=False):
with nethook.Trace(module, retain_grad=True, retain_input=True) as ret, (
torch.enable_grad()):
out = cloned_model(x.to(device))
r = ret.output
inp = ret.input
loss = torch.nn.functional.cross_entropy(out, y.to(device))
loss.backward()
# The contribution to the weight gradient from every patch.
# If we were to sum unfgrad.sum(dim=(0,5,6)) it would equal module.weight.grad
# Now to reduce things, we need to score it per-patch somehow. We will dot-product
# the average grad per-unit to see which patches push most in the consensus direction.
# This gives a per-unit score at every patch.
score = unfolder(inp) * r.grad
# Hack: it is interesting to separate the cases where rgrad is positive
# (the patch should look more like this to decrease the loss) from cases
# where it is negative (where the patch should look less like this. So
# we will drop cases here the score is negative, and then negate the
# score when ograd is negative.
signed_score = score.clamp(0) * (r.grad.sign())
if return_details:
return {k: v.detach().cpu() for k, v in dict(
model_output=out,
loss=loss,
layer_output=r,
layer_output_grad=r.grad,
layer_input=inp,
layer_input_by_Edw=unfolder(inp),
weight_grad=wg,
score=score,
signed_score=signed_score).items()}
return signed_score
# Equivalent unrolled code below.
# scores = []
# for i in range(0, len(unf), 2):
# ug = unf[i:i+2,None,:,:,:,:,:] * r.grad[i:i+2,:,None,None,None,:,:]
# # Now to reduce things, we need to score it per-patch somehow. We will dot-product
# # the average grad per-unit to see which patches push most in the consensus direction.
# # This gives a per-unit score at every patch.
# score = (ug * wg[None,:,:,:,:,None,None]
# ).view(ug.shape[0], ug.shape[1], -1, ug.shape[5], ug.shape[6]).sum(2)
# scores.append(score)
# return torch.cat(scores)
run.name = 'wgrad'
def compute_samples(batch, labels, *args):
score = run(batch, labels)
flat_score = score.view(score.shape[0], score.shape[1], -1)
top_score = flat_score.max(2)[0]
bot_score = flat_score.min(2)[0]
all_score = score.permute(0, 2, 3, 1).reshape(-1, score.shape[1])
return top_score, bot_score, all_score
topk, botk, rq = tally.tally_extremek_and_quantile(
compute_samples, dataset, k=k, r=r,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/swgrad_exk_rq.npz' if cachedir else None)
return topk, botk, rq, run
### Experiment below:
# tally p-v times every post-relu activation in a layer
# and also sum up every activation
# This is intended to measure how well a (simple linear) model
# of the given feature can help solve the error p-v.
def sep_stats(model, dataset, layer=None, cachedir=None,
batch_size=10, sample_size=None, num_workers=30):
assert not model.training
if layer is not None:
module = nethook.get_module(model, layer)
else:
module = model
device = next(model.parameters()).device
pin_memory = (device.type != 'cpu')
def run(x, labels, *args):
with nethook.Trace(module) as ret, torch.no_grad():
logits = model(x.to(device))
labels = labels.to(device)
r = ret.output
p = torch.nn.functional.softmax(logits, dim=1)
y = torch.zeros_like(p)
y.scatter_(1, labels[:,None], 1)
return r, p, y
def compute_samples(batch, labels, *args):
r, p, y = run(batch, labels)
err = p-y
sep_t = torch.cat((err, y, torch.ones(err.shape[0], 1, device=device)), dim=1)
flat_r = r.view(r.shape[0], r.shape[1], -1).mean(2)[:,:,None]
r_times_sep_t = flat_r * sep_t[:,None,:]
# Number of stats to track is units * (classes + 1)
sep_data = r_times_sep_t.view(len(batch), -1)
return sep_data
sepmv = tally.tally_mean(
compute_samples, dataset,
batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory,
sample_size=sample_size,
cachefile=f'{cachedir}/sep_stats.npz' if cachedir else None)
return sepmv
| [
"netdissect.tally.gather_topk",
"netdissect.nethook.get_module",
"netdissect.tally.tally_second_moment",
"copy.deepcopy",
"netdissect.tally.tally_topk_and_quantile",
"netdissect.nethook.replace_module",
"torch.nn.functional.softmax",
"netdissect.upsample.upsampler",
"netdissect.tally.tally_each",
... | [((5083, 5320), 'netdissect.tally.tally_topk_and_quantile', 'tally.tally_topk_and_quantile', (['compute_samples', 'dataset'], {'k': 'k', 'r': 'r', 'batch_size': 'batch_size', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory', 'sample_size': 'sample_size', 'cachefile': "(f'{cachedir}/acts_topk_rq.npz' if cachedir else None)"}), "(compute_samples, dataset, k=k, r=r,\n batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size, cachefile=f'{cachedir}/acts_topk_rq.npz' if\n cachedir else None)\n", (5112, 5320), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((5850, 5870), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (5863, 5870), False, 'import copy\n'), ((5875, 5921), 'netdissect.nethook.set_requires_grad', 'nethook.set_requires_grad', (['(False)', 'cloned_model'], {}), '(False, cloned_model)\n', (5900, 5921), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((6865, 7105), 'netdissect.tally.tally_extremek_and_quantile', 'tally.tally_extremek_and_quantile', (['compute_samples', 'dataset'], {'k': 'k', 'r': 'r', 'batch_size': 'batch_size', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory', 'sample_size': 'sample_size', 'cachefile': "(f'{cachedir}/grad_exk_rq.npz' if cachedir else None)"}), "(compute_samples, dataset, k=k, r=r,\n batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size, cachefile=f'{cachedir}/grad_exk_rq.npz' if\n cachedir else None)\n", (6898, 7105), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((7437, 7457), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (7450, 7457), False, 'import copy\n'), ((7462, 7508), 'netdissect.nethook.set_requires_grad', 'nethook.set_requires_grad', (['(False)', 'cloned_model'], {}), '(False, cloned_model)\n', (7487, 7508), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((7522, 7561), 'netdissect.nethook.get_module', 'nethook.get_module', (['cloned_model', 'layer'], {}), '(cloned_model, layer)\n', (7540, 7561), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((7566, 7605), 'netdissect.nethook.set_requires_grad', 'nethook.set_requires_grad', (['(True)', 'module'], {}), '(True, module)\n', (7591, 7605), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((9329, 9349), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (9342, 9349), False, 'import copy\n'), ((9354, 9400), 'netdissect.nethook.set_requires_grad', 'nethook.set_requires_grad', (['(False)', 'cloned_model'], {}), '(False, cloned_model)\n', (9379, 9400), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((9414, 9453), 'netdissect.nethook.get_module', 'nethook.get_module', (['cloned_model', 'layer'], {}), '(cloned_model, layer)\n', (9432, 9453), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((10294, 10540), 'netdissect.tally.tally_extremek_and_quantile', 'tally.tally_extremek_and_quantile', (['compute_samples', 'dataset'], {'k': 'k', 'r': 'r', 'batch_size': 'batch_size', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory', 'sample_size': 'sample_size', 'cachefile': "(f'{cachedir}/{run.name}_exk_rq.npz' if cachedir else None)"}), "(compute_samples, dataset, k=k, r=r,\n batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size, cachefile=f'{cachedir}/{run.name}_exk_rq.npz' if\n cachedir else None)\n", (10327, 10540), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((10905, 10925), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (10918, 10925), False, 'import copy\n'), ((10939, 10978), 'netdissect.nethook.get_module', 'nethook.get_module', (['cloned_model', 'layer'], {}), '(cloned_model, layer)\n', (10957, 10978), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((11030, 11076), 'netdissect.nethook.set_requires_grad', 'nethook.set_requires_grad', (['(False)', 'cloned_model'], {}), '(False, cloned_model)\n', (11055, 11076), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((11093, 11132), 'netdissect.unravelconv.unravel_left_conv2d', 'unravelconv.unravel_left_conv2d', (['module'], {}), '(module)\n', (11124, 11132), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((11224, 11278), 'netdissect.nethook.replace_module', 'nethook.replace_module', (['cloned_model', 'layer', 'unraveled'], {}), '(cloned_model, layer, unraveled)\n', (11246, 11278), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((11586, 11803), 'netdissect.tally.tally_second_moment', 'tally.tally_second_moment', (['ex_sample', 'dataset'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory', 'sample_size': 'sample_size', 'cachefile': "(f'{cachedir}/input_cov_moment.npz' if cachedir else None)"}), "(ex_sample, dataset, batch_size=batch_size,\n num_workers=num_workers, pin_memory=pin_memory, sample_size=sample_size,\n cachefile=f'{cachedir}/input_cov_moment.npz' if cachedir else None)\n", (11611, 11803), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((13488, 13517), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['[dataset[0]]'], {}), '([dataset[0]])\n', (13503, 13517), False, 'from torch.utils.data.dataloader import default_collate\n'), ((15027, 15065), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['[dataset_with_seg[0]]'], {}), '([dataset_with_seg[0]])\n', (15042, 15065), False, 'from torch.utils.data.dataloader import default_collate\n'), ((16076, 16309), 'netdissect.tally.tally_all_intersection_and_union', 'tally.tally_all_intersection_and_union', (['compute_concept_pair', 'dataset_with_seg'], {'sample_size': 'sample_size', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory', 'cachefile': "(f'{cachedir}/{neg}{run.name}_iu.npz' if cachedir else None)"}), "(compute_concept_pair,\n dataset_with_seg, sample_size=sample_size, num_workers=num_workers,\n pin_memory=pin_memory, cachefile=f'{cachedir}/{neg}{run.name}_iu.npz' if\n cachedir else None)\n", (16114, 16309), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((16710, 16748), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['[dataset_with_seg[0]]'], {}), '([dataset_with_seg[0]])\n', (16725, 16748), False, 'from torch.utils.data.dataloader import default_collate\n'), ((18104, 18170), 'netdissect.tally.gather_topk', 'tally.gather_topk', (['collate_unit_iou', 'dataset_with_seg', 'topk'], {'k': '(100)'}), '(collate_unit_iou, dataset_with_seg, topk, k=100)\n', (18121, 18170), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((18927, 18947), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (18940, 18947), False, 'import copy\n'), ((18952, 18998), 'netdissect.nethook.set_requires_grad', 'nethook.set_requires_grad', (['(False)', 'cloned_model'], {}), '(False, cloned_model)\n', (18977, 18998), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((19012, 19051), 'netdissect.nethook.get_module', 'nethook.get_module', (['cloned_model', 'layer'], {}), '(cloned_model, layer)\n', (19030, 19051), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((19429, 19620), 'torch.nn.Conv2d', 'torch.nn.Conv2d', ([], {'in_channels': 'module.in_channels', 'out_channels': 'module.out_channels', 'kernel_size': 'ks', 'padding': 'module.padding', 'dilation': 'module.dilation', 'stride': 'module.stride', 'bias': '(False)'}), '(in_channels=module.in_channels, out_channels=module.\n out_channels, kernel_size=ks, padding=module.padding, dilation=module.\n dilation, stride=module.stride, bias=False)\n', (19444, 19620), False, 'import torch\n'), ((19664, 19706), 'netdissect.nethook.set_requires_grad', 'nethook.set_requires_grad', (['(False)', 'unfolder'], {}), '(False, unfolder)\n', (19689, 19706), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((22481, 22723), 'netdissect.tally.tally_extremek_and_quantile', 'tally.tally_extremek_and_quantile', (['compute_samples', 'dataset'], {'k': 'k', 'r': 'r', 'batch_size': 'batch_size', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory', 'sample_size': 'sample_size', 'cachefile': "(f'{cachedir}/swgrad_exk_rq.npz' if cachedir else None)"}), "(compute_samples, dataset, k=k, r=r,\n batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size, cachefile=f'{cachedir}/swgrad_exk_rq.npz' if\n cachedir else None)\n", (22514, 22723), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((24159, 24366), 'netdissect.tally.tally_mean', 'tally.tally_mean', (['compute_samples', 'dataset'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory', 'sample_size': 'sample_size', 'cachefile': "(f'{cachedir}/sep_stats.npz' if cachedir else None)"}), "(compute_samples, dataset, batch_size=batch_size,\n num_workers=num_workers, pin_memory=pin_memory, sample_size=sample_size,\n cachefile=f'{cachedir}/sep_stats.npz' if cachedir else None)\n", (24175, 24366), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((4434, 4466), 'netdissect.nethook.get_module', 'nethook.get_module', (['model', 'layer'], {}), '(model, layer)\n', (4452, 4466), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((5965, 6004), 'netdissect.nethook.get_module', 'nethook.get_module', (['cloned_model', 'layer'], {}), '(cloned_model, layer)\n', (5983, 6004), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((8025, 8261), 'netdissect.tally.tally_each', 'tally.tally_each', (['accumulate_grad', 'dataset'], {'summarize': 'weight_grad', 'batch_size': 'batch_size', 'num_workers': 'num_workers', 'pin_memory': 'pin_memory', 'sample_size': 'sample_size', 'cachefile': "(f'{cachedir}/weight_grad.npz' if cachedir else None)"}), "(accumulate_grad, dataset, summarize=weight_grad,\n batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory,\n sample_size=sample_size, cachefile=f'{cachedir}/weight_grad.npz' if\n cachedir else None)\n", (8041, 8261), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((9555, 9570), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9568, 9570), False, 'import torch\n'), ((15375, 15411), 'netdissect.upsample.upsampler', 'upsample.upsampler', (['seg_size', 'r_size'], {}), '(seg_size, r_size)\n', (15393, 15411), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((15640, 15750), 'torch.zeros', 'torch.zeros', (['seg.shape[0]', 'num_seglabels', 'seg.shape[2]', 'seg.shape[3]'], {'dtype': 'torch.bool', 'device': 'seg.device'}), '(seg.shape[0], num_seglabels, seg.shape[2], seg.shape[3], dtype=\n torch.bool, device=seg.device)\n', (15651, 15750), False, 'import torch\n'), ((17092, 17128), 'netdissect.upsample.upsampler', 'upsample.upsampler', (['seg_size', 'r_size'], {}), '(seg_size, r_size)\n', (17110, 17128), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((17462, 17572), 'torch.zeros', 'torch.zeros', (['seg.shape[0]', 'num_seglabels', 'seg.shape[2]', 'seg.shape[3]'], {'dtype': 'torch.bool', 'device': 'seg.device'}), '(seg.shape[0], num_seglabels, seg.shape[2], seg.shape[3], dtype=\n torch.bool, device=seg.device)\n', (17473, 17572), False, 'import torch\n'), ((18756, 18788), 'netdissect.nethook.get_module', 'nethook.get_module', (['model', 'layer'], {}), '(model, layer)\n', (18774, 18788), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((23208, 23240), 'netdissect.nethook.get_module', 'nethook.get_module', (['model', 'layer'], {}), '(model, layer)\n', (23226, 23240), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((23562, 23604), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (23589, 23604), False, 'import torch\n'), ((23617, 23636), 'torch.zeros_like', 'torch.zeros_like', (['p'], {}), '(p)\n', (23633, 23636), False, 'import torch\n'), ((4622, 4654), 'netdissect.nethook.Trace', 'nethook.Trace', (['module'], {'stop': '(True)'}), '(module, stop=True)\n', (4635, 4654), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((4663, 4678), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4676, 4678), False, 'import torch\n'), ((6177, 6216), 'netdissect.nethook.Trace', 'nethook.Trace', (['module'], {'retain_grad': '(True)'}), '(module, retain_grad=True)\n', (6190, 6216), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((6243, 6262), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (6260, 6262), False, 'import torch\n'), ((7750, 7769), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (7767, 7769), False, 'import torch\n'), ((9750, 9782), 'netdissect.nethook.Trace', 'nethook.Trace', (['module'], {'stop': '(True)'}), '(module, stop=True)\n', (9763, 9782), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((9791, 9806), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9804, 9806), False, 'import torch\n'), ((11347, 11378), 'netdissect.nethook.Trace', 'nethook.Trace', (['tconv'], {'stop': '(True)'}), '(tconv, stop=True)\n', (11360, 11378), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((17149, 17186), 'torch.zeros', 'torch.zeros', (['num_units', 'num_seglabels'], {}), '(num_units, num_seglabels)\n', (17160, 17186), False, 'import torch\n'), ((17211, 17248), 'torch.zeros', 'torch.zeros', (['num_units', 'num_seglabels'], {}), '(num_units, num_seglabels)\n', (17222, 17248), False, 'import torch\n'), ((19823, 19881), 'netdissect.nethook.Trace', 'nethook.Trace', (['module'], {'retain_grad': '(True)', 'retain_input': '(True)'}), '(module, retain_grad=True, retain_input=True)\n', (19836, 19881), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((19908, 19927), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (19925, 19927), False, 'import torch\n'), ((23404, 23425), 'netdissect.nethook.Trace', 'nethook.Trace', (['module'], {}), '(module)\n', (23417, 23425), False, 'from netdissect import nethook, imgviz, tally, unravelconv, upsample\n'), ((23434, 23449), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (23447, 23449), False, 'import torch\n'), ((23838, 23880), 'torch.ones', 'torch.ones', (['err.shape[0]', '(1)'], {'device': 'device'}), '(err.shape[0], 1, device=device)\n', (23848, 23880), False, 'import torch\n')] |
from math import floor, log
n = lambda x: (x + 1) % 2 + 1
s = lambda x: floor(log(x + 1, 2))
r = lambda x: x // 2 - n(x) + 1 + n(x) * 2**(s(x) - 1)
pn = lambda p: r(p) + (n(p) == 2) * (r(r(p)) - r(p))
dn = lambda p, d: (d + (n(p) == n(d) == 2) * (n(r(p)) * 2**s(d)) - (n(p) == 1)) // (3 - n(p))
| [
"math.log"
] | [((82, 95), 'math.log', 'log', (['(x + 1)', '(2)'], {}), '(x + 1, 2)\n', (85, 95), False, 'from math import floor, log\n')] |
import warnings
import evidently.model_profile.sections
from evidently.model_profile.sections import *
__path__ = evidently.model_profile.sections.__path__ # type: ignore
warnings.warn("'import evidently.profile_sections' is deprecated, use 'import evidently.model_profile.sections'")
| [
"warnings.warn"
] | [((175, 298), 'warnings.warn', 'warnings.warn', (['"""\'import evidently.profile_sections\' is deprecated, use \'import evidently.model_profile.sections\'"""'], {}), '(\n "\'import evidently.profile_sections\' is deprecated, use \'import evidently.model_profile.sections\'"\n )\n', (188, 298), False, 'import warnings\n')] |
import functools
from .base_classes import Container, Void
class BaseURL(Void):
"""The HTML `<base>` element specifies the base URL to use for *all*
relative URLs in a document. There can be only one `<base>` element in a
document.
"""
__slots__ = ()
tag = "base"
Base = BaseURL
class ExternalResourceLink(Void):
"""The HTML External Resource Link element (`<link>`) specifies
relationships between the current document and an external resource.
This element is most commonly used to link to stylesheets, but is
also used to establish site icons (both "favicon" style icons and
icons for the home screen and apps on mobile devices) among other
things.
"""
__slots__ = ()
tag = "link"
Link = ExternalResourceLink
ExternalStyleSheet = functools.partial(ExternalResourceLink, rel="stylesheet")
class Meta(Void):
"""The HTML `<meta>` element represents metadata that cannot be
represented by other HTML meta-related elements, like `<base>`,
`<link>`, `<script>`, `<style>` or `<title>`.
"""
__slots__ = ()
tag = "meta"
class Style(Container):
"""The HTML `<style>` element contains style information for a
document, or part of a document.
"""
__slots__ = ()
tag = "style"
class Title(Container):
"""The HTML Title element (`<title>`) defines the document's title
that is shown in a browser's title bar or a page's tab.
"""
__slots__ = ()
tag = "title"
| [
"functools.partial"
] | [((805, 862), 'functools.partial', 'functools.partial', (['ExternalResourceLink'], {'rel': '"""stylesheet"""'}), "(ExternalResourceLink, rel='stylesheet')\n", (822, 862), False, 'import functools\n')] |
# !/usr/bin/env python3
"""Importing"""
# Importing Inbuilt packages
from re import match
from shutil import rmtree
from uuid import uuid4
from os import makedirs
# Importing Credentials & Developer defined modules
from helper.downloader.urlDL import UrlDown
from helper.downloader.tgDL import TgDown
from helper.downloader.ytDL import YTDown
from botModule.botMSG import BotMessage
# Importing Credentials & Required Data
try:
from testexp.config import Config
except ModuleNotFoundError:
from config import Config
"""Downloader Class"""
class Downloader:
def __init__(self, bot, msg, log_obj):
self.bot = bot
self.msg = msg
self.log_obj = log_obj
slash = '//' if '/'in Config.DOWNLOAD_LOCATION else '\\'
self.Downloadfolder = Config.DOWNLOAD_LOCATION + slash + str(uuid4()) + slash
makedirs(self.Downloadfolder)
async def start(self):
if self.msg.media: #For Telegram File/media
self.process_msg = await self.msg.reply_text(BotMessage.processing_file, parse_mode = 'html')
await self.file_downloader()
else:
self.url = self.msg.text
self.process_msg = await self.msg.reply_text(BotMessage.processing_url, parse_mode = 'html')
if match('^https://(www.)?youtu(.)?be(.com)?/(.*)', self.url): #For Youtube Video
await self.msg.reply_text("Currently not supporting Youtube Videos.")
await self.process_msg.delete()
# await self.youtube_downloader()
else: #Normal Url
await self.url_downloader()
return self
#Downloading Youtube Video
async def youtube_downloader(self):
rmtree(self.Downloadfolder, ignore_errors = True)
ytDl = YTDown(self.bot, self.msg, self.process_msg, self.url, self.log_obj)
await ytDl.start()
self.filename = None
return
#Downloading From url
async def url_downloader(self):
urlDl = UrlDown(self.bot, self.msg, self.process_msg, self.Downloadfolder, self.url)
await urlDl.start()
self.filename = urlDl.filename
if urlDl.filename:
self.n_msg = urlDl.n_msg
return
return
#Downloading From Telegram File/Media
async def file_downloader(self):
tgDl = TgDown(self.bot, self.msg, self.process_msg, self.Downloadfolder)
await tgDl.start()
self.filename = tgDl.filename
if self.filename:
self.n_msg = tgDl.n_msg
return
return
| [
"helper.downloader.tgDL.TgDown",
"os.makedirs",
"re.match",
"uuid.uuid4",
"helper.downloader.ytDL.YTDown",
"shutil.rmtree",
"helper.downloader.urlDL.UrlDown"
] | [((852, 881), 'os.makedirs', 'makedirs', (['self.Downloadfolder'], {}), '(self.Downloadfolder)\n', (860, 881), False, 'from os import makedirs\n'), ((1726, 1773), 'shutil.rmtree', 'rmtree', (['self.Downloadfolder'], {'ignore_errors': '(True)'}), '(self.Downloadfolder, ignore_errors=True)\n', (1732, 1773), False, 'from shutil import rmtree\n'), ((1791, 1859), 'helper.downloader.ytDL.YTDown', 'YTDown', (['self.bot', 'self.msg', 'self.process_msg', 'self.url', 'self.log_obj'], {}), '(self.bot, self.msg, self.process_msg, self.url, self.log_obj)\n', (1797, 1859), False, 'from helper.downloader.ytDL import YTDown\n'), ((2010, 2086), 'helper.downloader.urlDL.UrlDown', 'UrlDown', (['self.bot', 'self.msg', 'self.process_msg', 'self.Downloadfolder', 'self.url'], {}), '(self.bot, self.msg, self.process_msg, self.Downloadfolder, self.url)\n', (2017, 2086), False, 'from helper.downloader.urlDL import UrlDown\n'), ((2347, 2412), 'helper.downloader.tgDL.TgDown', 'TgDown', (['self.bot', 'self.msg', 'self.process_msg', 'self.Downloadfolder'], {}), '(self.bot, self.msg, self.process_msg, self.Downloadfolder)\n', (2353, 2412), False, 'from helper.downloader.tgDL import TgDown\n'), ((1285, 1343), 're.match', 'match', (['"""^https://(www.)?youtu(.)?be(.com)?/(.*)"""', 'self.url'], {}), "('^https://(www.)?youtu(.)?be(.com)?/(.*)', self.url)\n", (1290, 1343), False, 'from re import match\n'), ((827, 834), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (832, 834), False, 'from uuid import uuid4\n')] |
# -*- coding: utf-8 -*-
"""
Functionality for binding wx control label shortcut keys to events
automatically. In wx, a button with a label "E&xit" would be displayed as
having the label "Exit" with "x" underlined, indicating a keyboard shortcut,
but wx does not bind these shortcuts automatically, requiring constructing
the acceleration table piecemeal.
Supported controls:
- wx.Button click handler called
- wx.CheckBox value is reversed, control focused, change handler called
- wx.TextCtrl control focused, all text selected
- wx.RadioButton control focused, value selected
- wx.Control control focused
- wx.ToolBar tool event is called, if the tool shorthelp includes a
parseable shortcut key like (Alt-S)
- wx.ToggleButton ToggleButton handler called
Uses primitive heuristic analysis to detect connected label-control pairs:
- wx.StaticTexts whose next sibling is a focusable control
- wx.StaticTexts that have an Id one less from a focusable control (created
immediately before creating the control)
- wx.StaticTexts that have the same Name as a control with "label" appended or
prepended,
e.g. "iptext" and "iptext_label"|"iptext.label"|"iptext label"|"labeliptext"
------------------------------------------------------------------------------
This file is part of Skyperious - a Skype database viewer and merger.
Released under the MIT License.
@author <NAME>
@created 19.11.2011
@modified 09.03.2015
------------------------------------------------------------------------------
"""
import functools
import re
import wx
DEBUG = False
class AutoAcceleratorMixIn(object):
"""
A windowed control that assigns global keyboard shortcuts to all its
controls that have a shortcut key defined in their label (e.g. a button'
labeled "E&xit" gets assigned the shortcut Alt-X).
Accelerator table is autocreated on first showing; if changing controls
afterwards, call UpdateAccelerators().
@param use_heuristics whether to use heuristic analysis to detect
connected label-control pairs
"""
def __init__(self, use_heuristics=True):
"""
@param use_heuristics whether to use heuristic analysis to detect
connected label-control pairs
"""
self.__use_heuristics = use_heuristics
self.__shortcuts = None # {shortcut char: target control, }
def Show(self, *args, **kwargs):
"""
Initializes the shortcut keys from child controls, if not already
created, and calls parent.Show.
"""
if not hasattr(self, "__shortcuts"):
self.__shortcuts = None # {shortcut char: target control, }
if self.__shortcuts is None:
self.UpdateAccelerators()
super(AutoAcceleratorMixIn, self).Show(*args, **kwargs)
def UpdateAccelerators(self, use_heuristics=True):
"""
Rebuilds the control shortcut keys in this frame.
@param use_heuristics whether to use heuristic analysis to detect
connected label-control pairs (sticky)
"""
if not hasattr(self, "__shortcuts"):
self.__shortcuts = None # {shortcut char: target control, }
self.__use_heuristics = use_heuristics
self.__shortcuts = accelerate(self, self.__use_heuristics)
def collect_shortcuts(control, use_heuristics=True):
"""
Returns a map of detected shortcut keys and target controls under the
specified control.
@param control the control to start from
@param use_heuristics whether to use heuristic analysis to detect
connected label-control pairs
@return a map of detected shortcut chars and a list of
their target controls (there can be several
controls with one shortcut, e.g. controls on
different pages of a Notebook)
"""
result = {} # {char: control, }
nameds = {} # collected controls with Name {name: control, }
statics = {} # collected StaticTexts with a shortcut {control: char, }
def parse_shortcuts(ctrl):
"""
Parses the shortcut keys from the control label, if any.
@return [keys]
"""
result = []
# wx.TextCtrl.Label is the same as its value, so must not use that
if isinstance(ctrl, wx.ToolBar):
toolsmap = dict()
for i in range(ctrl.GetToolsCount() + 1):
# wx 2.8 has no functionality for getting tools by index, so
# need to gather them by layout position
try:
tool = ctrl.FindToolForPosition(i * ctrl.ToolSize[0], 0)
toolsmap[repr(tool)] = tool
except Exception: pass # FindTool not implemented in GTK
for tool in filter(None, toolsmap.values()):
text = ctrl.GetToolShortHelp(tool.GetId())
parts = re.split("\\(Alt-(.)\\)", text, maxsplit=1)
if len(parts) > 1:
result.append(parts[1].lower())
elif hasattr(ctrl, "Label") and not isinstance(ctrl, wx.TextCtrl):
for part in filter(len, ctrl.Label.split("&")[1:]):
# Labels have potentially multiple ampersands - find one that
# is usable (preceding a valid character. 32 and lower are
# spaces, punctuation, control characters, etc).
key = part[0].lower()
if ord(key) > 32:
result.append(key)
if (DEBUG) and key:
print("Parsed '%s' in label '%s'." % (key, ctrl.Label))
break # break for part in filter
return result
def collect_recurse(ctrl, result, nameds, statics):
"""
Goes through the control and all its children and collects accelerated
controls.
@return {key: control, }
"""
if hasattr(ctrl, "GetChildren"):
children = ctrl.GetChildren()
for i in range(len(children)):
collect_recurse(children[i], result, nameds, statics)
keys = parse_shortcuts(ctrl)
for key in keys:
if isinstance(ctrl, wx.StaticText):
statics[ctrl] = key
else:
if key not in result:
result[key] = []
if ctrl not in result[key]:
result[key].append(ctrl)
if (DEBUG): print("Selected '%s' for '%s' (%s.Id=%s)." %
(key, ctrl.Label, ctrl.ClassName,
ctrl.GetId()))
if ctrl.Name:
if DEBUG: print("Found named control %s %s." % (ctrl.Name, ctrl))
nameds[ctrl.Name] = ctrl
collect_recurse(control, result, nameds, statics)
result_values = [j for i in result.values() for j in i]
if use_heuristics:
for ctrl, key in statics.items():
# For wx.StaticTexts, see if the next sibling, or control with the
# next ID, or control sitting next in the sizer is focusable -
# shortcut will set focus to the control.
chosen = None
next_sibling = hasattr(ctrl, "GetNextSibling") \
and ctrl.GetNextSibling()
# Do not include buttons, as buttons have their own shortcut keys.
if next_sibling and not isinstance(next_sibling, wx.Button) \
and (not next_sibling.Enabled or next_sibling.AcceptsFocus()
or getattr(next_sibling, "CanAcceptFocus", lambda: False)()):
chosen = next_sibling
if (DEBUG):
print("Selected '%s' by previous sibling wxStaticText "
"'%s' (%s.ID=%s)." %
(key, ctrl.Label, chosen.ClassName, chosen.Id))
if not chosen:
# Try to see if the item with the next ID is focusable.
next_ctrl = wx.FindWindowById(ctrl.Id - 1)
# Disabled controls might return False for AcceptsFocus).
if next_ctrl and not isinstance(next_ctrl, wx.Button) \
and (not next_ctrl.Enabled or next_ctrl.AcceptsFocus()
or getattr(next_ctrl, "CanAcceptFocus", lambda: False)()):
chosen = next_ctrl
if (DEBUG):
print("Selected '%s' by previous ID wxStaticText "
"'%s' (%s.ID=%s)." %
(key, ctrl.Label, chosen.ClassName, chosen.Id))
if not chosen and ctrl.ContainingSizer:
# Try to see if the item next in the same sizer is focusable
sizer_items = []
while True:
try:
item = ctrl.ContainingSizer.GetItem(len(sizer_items))
sizer_items.append(item.Window)
except Exception:
break # Reached item limit
index = sizer_items.index(ctrl)
if index < len(sizer_items) - 1:
next_ctrl = sizer_items[index + 1]
if (next_ctrl and not isinstance(next_ctrl, wx.Button)
and (not next_ctrl.Enabled or next_ctrl.AcceptsFocus()
or getattr(next_ctrl, "CanAcceptFocus", lambda: False)())):
chosen = next_ctrl
if (DEBUG):
print("Selected '%s' by previous in sizer "
"wxStaticText '%s' (%s.ID=%s)." %
(key, ctrl.Label, chosen.ClassName, chosen.Id))
if chosen and chosen not in result_values:
if key not in result:
result[key] = []
result[key].append(chosen)
result_values.append(chosen)
for name, ctrl in nameds.items():
# For named controls, see if there is another control with the same
# name, but "label" appended or prepended.
if (DEBUG): print("Going through named %s '%s'." % (ctrl, name))
match_found = False
label_regex = re.compile("(^label[_ \\.]*%s$)|(^%s[_ \\.]*label$)"
% tuple([name] * 2), re.IGNORECASE)
for potential_name, potential in nameds.items():
if label_regex.match(potential_name):
keys = parse_shortcuts(potential)
for key in keys:
if (DEBUG):
print("Name %s matches potential %s, key=%s." % (
name, potential_name, key))
if key and (ctrl not in result_values):
match_found = True
if key not in result:
result[key] = []
if ctrl not in result[key]:
result[key].append(ctrl)
result_values.append(ctrl)
if (DEBUG):
print("Selected '%s' by named StaticText "
"'%s' (%s.ID=%s, %s.Name=%s, "
"wxStaticText.Name=%s)." %
(key, potential.Label, ctrl.ClassName,
ctrl.ClassName, ctrl.Id, ctrl.Name,
potential.Name))
break # break for key in keys
if match_found:
break # break for potential_name, potential in nameds
return result
def accelerate(window, use_heuristics=True):
"""
Assigns global keyboard shortcuts to all controls under the specified
wx.Window that have a shortcut key defined in their label (e.g. a button
labeled "E&xit" gets assigned the shortcut Alt-X). Resets previously
set accelerators, if any.
@param control the wx.Window instance to process, gets its
accelerator table reset
@param use_heuristics whether to use heuristic analysis to detect
connected label-control pairs
@return a map of detected shortcut chars and their target
controls
"""
def shortcut_handler(targets, key, shortcut_event):
"""
Shortcut event handler, calls the appropriate event on the target.
@param targets list of target controls. If there is more than
one target control, the first non-disabled
and visible is chosen.
@param key the event shortcut key, like 's'
@param shortcut_event menu event generated by the accelerator table
"""
if (DEBUG):
print("Handling target %s" %
[(type(t), t.Id, t.Label) for t in targets])
event = None
for target in targets:
if (isinstance(target, wx.Control) # has not been destroyed
and target.IsShownOnScreen() # visible on current panel
and target.Enabled):
if isinstance(target, wx.Button):
# Buttons do not get focus on shortcuts by convention
event = wx.CommandEvent(wx.EVT_BUTTON.typeId, target.Id)
event.SetEventObject(target)
elif isinstance(target, wx.ToggleButton):
# Buttons do not get focus on shortcuts by convention
event = wx.CommandEvent(wx.EVT_TOGGLEBUTTON.typeId,
target.Id)
event.SetEventObject(target)
# Need to change value, as event goes directly to handler
target.Value = not target.Value
elif isinstance(target, wx.CheckBox):
event = wx.CommandEvent(wx.EVT_CHECKBOX.typeId, target.Id)
# Need to change value, as event goes directly to handler
target.Value = not target.Value
target.SetFocus()
elif isinstance(target, wx.ToolBar):
# Toolbar shortcuts are defined in their shorthelp texts
toolsmap, tb = dict(), target
for i in range(tb.GetToolsCount() + 1):
try:
tool = tb.FindToolForPosition(i * tb.ToolSize[0], 0)
toolsmap[repr(tool)] = tool
except Exception: pass # FindTool not implemented in GTK
for tool in filter(None, toolsmap.values()):
id = tool.GetId()
text = tb.GetToolShortHelp(id)
parts = re.split("\\(Alt-(%s)\\)" % key, text,
maxsplit=1, flags=re.IGNORECASE)
if len(parts) > 1:
event = wx.CommandEvent(wx.EVT_TOOL.typeId, id)
event.SetEventObject(target)
target.ToggleTool(id, not target.GetToolState(id))
break # break for i in range(target.GetToolsCount)
else:
target.SetFocus()
if isinstance(target, wx.TextCtrl):
target.SelectAll()
break # break for target in targets
if event:
if (DEBUG): print("Chose target %s." % (target.Label or target))
wx.PostEvent(target.GetEventHandler(), event)
else:
shortcut_event.Skip(True) # Not handled by us: propagate
if hasattr(window, "__ampersand_shortcut_menu"):
# Remove previously created menu, if any
for menu_item in window.__ampersand_shortcut_menu.MenuItems:
if (DEBUG): print("Removing dummy menu item '%s'" % menu_item.Label)
window.Unbind(wx.EVT_MENU, menu_item)
del window.__ampersand_shortcut_menu
shortcuts = collect_shortcuts(window, use_heuristics)
if shortcuts:
accelerators = []
dummy_menu = wx.Menu()
for key, targets in shortcuts.items():
if (DEBUG): print("Binding %s to targets %s." %
(key, [type(t) for t in targets]))
menu_item = dummy_menu.Append(wx.ID_ANY, text="&%s" % key)
window.Bind(wx.EVT_MENU,
functools.partial(shortcut_handler, targets, key),
menu_item)
accelerators.append((wx.ACCEL_ALT, ord(key), menu_item.Id))
window.SetAcceleratorTable(wx.AcceleratorTable(accelerators))
window.__ampersand_shortcut_menu = dummy_menu
return shortcuts
| [
"re.split",
"wx.FindWindowById",
"wx.AcceleratorTable",
"wx.CommandEvent",
"functools.partial",
"wx.Menu"
] | [((16908, 16917), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (16915, 16917), False, 'import wx\n'), ((17424, 17457), 'wx.AcceleratorTable', 'wx.AcceleratorTable', (['accelerators'], {}), '(accelerators)\n', (17443, 17457), False, 'import wx\n'), ((5205, 5248), 're.split', 're.split', (['"""\\\\(Alt-(.)\\\\)"""', 'text'], {'maxsplit': '(1)'}), "('\\\\(Alt-(.)\\\\)', text, maxsplit=1)\n", (5213, 5248), False, 'import re\n'), ((8367, 8397), 'wx.FindWindowById', 'wx.FindWindowById', (['(ctrl.Id - 1)'], {}), '(ctrl.Id - 1)\n', (8384, 8397), False, 'import wx\n'), ((17228, 17277), 'functools.partial', 'functools.partial', (['shortcut_handler', 'targets', 'key'], {}), '(shortcut_handler, targets, key)\n', (17245, 17277), False, 'import functools\n'), ((13971, 14019), 'wx.CommandEvent', 'wx.CommandEvent', (['wx.EVT_BUTTON.typeId', 'target.Id'], {}), '(wx.EVT_BUTTON.typeId, target.Id)\n', (13986, 14019), False, 'import wx\n'), ((14233, 14287), 'wx.CommandEvent', 'wx.CommandEvent', (['wx.EVT_TOGGLEBUTTON.typeId', 'target.Id'], {}), '(wx.EVT_TOGGLEBUTTON.typeId, target.Id)\n', (14248, 14287), False, 'import wx\n'), ((14599, 14649), 'wx.CommandEvent', 'wx.CommandEvent', (['wx.EVT_CHECKBOX.typeId', 'target.Id'], {}), '(wx.EVT_CHECKBOX.typeId, target.Id)\n', (14614, 14649), False, 'import wx\n'), ((15514, 15585), 're.split', 're.split', (["('\\\\(Alt-(%s)\\\\)' % key)", 'text'], {'maxsplit': '(1)', 'flags': 're.IGNORECASE'}), "('\\\\(Alt-(%s)\\\\)' % key, text, maxsplit=1, flags=re.IGNORECASE)\n", (15522, 15585), False, 'import re\n'), ((15709, 15748), 'wx.CommandEvent', 'wx.CommandEvent', (['wx.EVT_TOOL.typeId', 'id'], {}), '(wx.EVT_TOOL.typeId, id)\n', (15724, 15748), False, 'import wx\n')] |
"""
from https://github.com/PoonLab/MiCall-Lite, which was forked from
https://github.com/cfe-lab/MiCall.
MiCall is distributed under a dual AGPLv3 license.
"""
import sys
import argparse
from csv import DictWriter
from struct import unpack
import csv
import os
from operator import itemgetter
import sys
import math
from itertools import groupby
def read_records(data_file, min_version):
""" Read records from an Illumina Interop file.
:param file data_file: an open file-like object. Needs to have a two-byte
header with the file version and the length of each record, followed by the
records.
:param int min_version: the minimum accepted file version.
:return: an iterator over the records in the file. Each record will be a raw
byte string of the length from the header.
"""
header = data_file.read(2)
version, record_length = unpack('!BB', header)
if version < min_version:
raise IOError(
'File version {} is less than minimum version {} in {}.'.format(
version,
min_version,
data_file.name))
while True:
data = data_file.read(record_length)
read_length = len(data)
if read_length == 0:
break
if read_length < record_length:
raise IOError('Partial record of length {} found in {}.'.format(
read_length,
data_file.name))
yield data
def read_errors(data_file):
""" Read error rate data from a phiX data file.
:param file data_file: an open file-like object. Needs to have a two-byte
header with the file version and the length of each record, followed by the
records.
:return: an iterator over the records of data in the file. Each record is a
dictionary with the following keys:
- lane [uint16]
- tile [uint16]
- cycle [uint16]
- error_rate [float]
- num_0_errors [uint32]
- num_1_error [uint32]
- num_2_errors [uint32]
- num_3_errors [uint32]
- num_4_errors [uint32]
"""
PARSED_LENGTH = 30
for data in read_records(data_file, min_version=3):
fields = unpack('<HHHfLLLLL', data[:PARSED_LENGTH])
yield dict(lane=fields[0],
tile=fields[1],
cycle=fields[2],
error_rate=fields[3],
num_0_errors=fields[4],
num_1_error=fields[5],
num_2_errors=fields[6],
num_3_errors=fields[7],
num_4_errors=fields[8])
def _yield_cycles(records, read_lengths):
sorted_records = sorted(map(itemgetter('tile', 'cycle', 'error_rate'),
records))
max_forward_cycle = read_lengths and read_lengths[0] or sys.maxsize
min_reverse_cycle = read_lengths and sum(read_lengths[:-1])+1 or sys.maxsize
for record in sorted_records:
cycle = record[1]
if cycle >= min_reverse_cycle:
cycle = min_reverse_cycle - cycle - 1
elif cycle > max_forward_cycle:
continue
rate = round(record[2], 4)
yield record[0], cycle, rate
def _record_grouper(record):
# Group by tile and sign of cycle (forward or reverse).
return (record[0], int(math.copysign(1, record[1])))
def write_phix_csv(out_file, records, read_lengths=None, summary=None):
""" Write phiX error rate data to a comma-separated-values file.
Missing cycles are written with blank error rates, index reads are not
written, and reverse reads are written with negative cycles.
:param out_file: an open file to write to
:param records: a sequence of dictionaries like those yielded from
read_phix().
:param read_lengths: a list of lengths for each type of read: forward,
indexes, and reverse
:param dict summary: a dictionary to hold the summary values:
error_rate_fwd and error_rate_rev.
"""
writer = csv.writer(out_file, lineterminator=os.linesep)
writer.writerow(['tile', 'cycle', 'errorrate'])
error_sums = [0.0, 0.0]
error_counts = [0, 0]
for (_tile, sign), group in groupby(_yield_cycles(records, read_lengths),
_record_grouper):
previous_cycle = 0
record = None
for record in group:
cycle = record[1]
previous_cycle += sign
while previous_cycle*sign < cycle*sign:
writer.writerow((record[0], previous_cycle, ''))
previous_cycle += sign
writer.writerow(record)
summary_index = (sign+1) // 2
error_sums[summary_index] += record[2]
error_counts[summary_index] += 1
if read_lengths:
read_length = read_lengths[0] if sign == 1 else -read_lengths[-1]
while previous_cycle*sign < read_length*sign:
previous_cycle += sign
writer.writerow((record[0], previous_cycle, ''))
if error_counts[1] > 0 and summary is not None:
summary['error_rate_fwd'] = error_sums[1]/error_counts[1]
if error_counts[0] > 0 and summary is not None:
summary['error_rate_rev'] = error_sums[0]/error_counts[0]
def main():
aparser = argparse.ArgumentParser(description='Extract phiX174 error rates from InterOp file')
aparser.add_argument('bin', type=argparse.FileType('rb'), help='ErrorMetricsOut.bin file from run')
aparser.add_argument('output', type=argparse.FileType('w'), help='File to write CSV output')
aparser.add_argument('-l', '--len', type=int, default=151, help='Read length')
aparser.add_argument('-a', type=int, default=8, help='Adapter sequence length, defaults to 8')
args = aparser.parse_args()
#parse_interop(args.bin, args.output)
records = read_errors(args.bin)
write_phix_csv(args.output, records, [args.len, args.a, args.a, args.len])
if __name__ == '__main__':
main()
| [
"argparse.FileType",
"argparse.ArgumentParser",
"csv.writer",
"math.copysign",
"struct.unpack",
"operator.itemgetter"
] | [((876, 897), 'struct.unpack', 'unpack', (['"""!BB"""', 'header'], {}), "('!BB', header)\n", (882, 897), False, 'from struct import unpack\n'), ((3948, 3995), 'csv.writer', 'csv.writer', (['out_file'], {'lineterminator': 'os.linesep'}), '(out_file, lineterminator=os.linesep)\n', (3958, 3995), False, 'import csv\n'), ((5243, 5332), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract phiX174 error rates from InterOp file"""'}), "(description=\n 'Extract phiX174 error rates from InterOp file')\n", (5266, 5332), False, 'import argparse\n'), ((2156, 2198), 'struct.unpack', 'unpack', (['"""<HHHfLLLLL"""', 'data[:PARSED_LENGTH]'], {}), "('<HHHfLLLLL', data[:PARSED_LENGTH])\n", (2162, 2198), False, 'from struct import unpack\n'), ((2636, 2677), 'operator.itemgetter', 'itemgetter', (['"""tile"""', '"""cycle"""', '"""error_rate"""'], {}), "('tile', 'cycle', 'error_rate')\n", (2646, 2677), False, 'from operator import itemgetter\n'), ((3274, 3301), 'math.copysign', 'math.copysign', (['(1)', 'record[1]'], {}), '(1, record[1])\n', (3287, 3301), False, 'import math\n'), ((5365, 5388), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (5382, 5388), False, 'import argparse\n'), ((5472, 5494), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (5489, 5494), False, 'import argparse\n')] |
import numpy as np
import cv2
import glob
import itertools
import os
from tqdm import tqdm
from ..models.config import IMAGE_ORDERING
from .augmentation import augment_seg
import random
random.seed(0)
class_colors = [ ( random.randint(0,255),random.randint(0,255),random.randint(0,255) ) for _ in range(5000) ]
def get_pairs_from_paths( images_path , segs_path ):
images = glob.glob( os.path.join(images_path,"*.png") ) + glob.glob( os.path.join(images_path,"*.jpg") ) + glob.glob( os.path.join(images_path,"*.jpeg") )
segmentations = glob.glob( os.path.join(segs_path,"*.png") )
segmentations_d = dict( zip(segmentations,segmentations ))
ret = []
for im in images:
seg_bnme = os.path.basename(im).replace(".jpg" , ".png").replace(".jpeg" , ".png")
seg = os.path.join( segs_path , seg_bnme )
#this line i have commented as error was showing
#assert ( seg in segmentations_d ), (im + " is present in "+images_path +" but "+seg_bnme+" is not found in "+segs_path + " . Make sure annotation image are in .png" )
ret.append((im , seg) )
return ret
def get_image_arr( path , width , height , imgNorm="sub_mean" , odering='channels_first' ):
if type( path ) is np.ndarray:
img = path
else:
img = cv2.imread(path, 1)
if imgNorm == "sub_and_divide":
img = np.float32(cv2.resize(img, ( width , height ))) / 127.5 - 1
elif imgNorm == "sub_mean":
img = cv2.resize(img, ( width , height ))
img = img.astype(np.float32)
img[:,:,0] -= 103.939
img[:,:,1] -= 116.779
img[:,:,2] -= 123.68
img = img[ : , : , ::-1 ]
elif imgNorm == "divide":
img = cv2.resize(img, ( width , height ))
img = img.astype(np.float32)
img = img/255.0
if odering == 'channels_first':
img = np.rollaxis(img, 2, 0)
return img
def get_segmentation_arr( path , nClasses , width , height , no_reshape=False ):
seg_labels = np.zeros(( height , width , nClasses ))
if type( path ) is np.ndarray:
img = path
else:
img = cv2.imread(path, 1)
img = cv2.resize(img, ( width , height ) , interpolation=cv2.INTER_NEAREST )
img = img[:, : , 0]
for c in range(nClasses):
seg_labels[: , : , c ] = (img == c ).astype(int)
if no_reshape:
return seg_labels
seg_labels = np.reshape(seg_labels, ( width*height , nClasses ))
return seg_labels
def verify_segmentation_dataset( images_path , segs_path , n_classes ):
img_seg_pairs = get_pairs_from_paths( images_path , segs_path )
assert len(img_seg_pairs)>0 , "Dataset looks empty or path is wrong "
for im_fn , seg_fn in tqdm(img_seg_pairs) :
img = cv2.imread( im_fn )
seg = cv2.imread( seg_fn )
assert ( img.shape[0]==seg.shape[0] and img.shape[1]==seg.shape[1] ) , "The size of image and the annotation does not match or they are corrupt "+ im_fn + " " + seg_fn
assert ( np.max(seg[:,:,0]) < n_classes) , "The pixel values of seg image should be from 0 to "+str(n_classes-1) + " . Found pixel value "+str(np.max(seg[:,:,0]))
print("Dataset verified! ")
def image_segmentation_generator( images_path , segs_path , batch_size, n_classes , input_height , input_width , output_height , output_width , do_augment=False ):
img_seg_pairs = get_pairs_from_paths( images_path , segs_path )
random.shuffle( img_seg_pairs )
zipped = itertools.cycle( img_seg_pairs )
while True:
X = []
Y = []
for _ in range( batch_size) :
im , seg = next(zipped)
im = cv2.imread(im , 1 )
seg = cv2.imread(seg , 1 )
if do_augment:
img , seg[:,:,0] = augment_seg( img , seg[:,:,0] )
X.append( get_image_arr(im , input_width , input_height ,odering=IMAGE_ORDERING ) )
Y.append( get_segmentation_arr( seg , n_classes , output_width , output_height ) )
yield np.array(X) , np.array(Y)
| [
"itertools.cycle",
"numpy.reshape",
"random.shuffle",
"tqdm.tqdm",
"os.path.join",
"numpy.rollaxis",
"random.seed",
"numpy.max",
"numpy.array",
"numpy.zeros",
"os.path.basename",
"cv2.resize",
"cv2.imread",
"random.randint"
] | [((189, 203), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (200, 203), False, 'import random\n'), ((1887, 1922), 'numpy.zeros', 'np.zeros', (['(height, width, nClasses)'], {}), '((height, width, nClasses))\n', (1895, 1922), True, 'import numpy as np\n'), ((2020, 2085), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(img, (width, height), interpolation=cv2.INTER_NEAREST)\n', (2030, 2085), False, 'import cv2\n'), ((2246, 2296), 'numpy.reshape', 'np.reshape', (['seg_labels', '(width * height, nClasses)'], {}), '(seg_labels, (width * height, nClasses))\n', (2256, 2296), True, 'import numpy as np\n'), ((2558, 2577), 'tqdm.tqdm', 'tqdm', (['img_seg_pairs'], {}), '(img_seg_pairs)\n', (2562, 2577), False, 'from tqdm import tqdm\n'), ((3241, 3270), 'random.shuffle', 'random.shuffle', (['img_seg_pairs'], {}), '(img_seg_pairs)\n', (3255, 3270), False, 'import random\n'), ((3283, 3313), 'itertools.cycle', 'itertools.cycle', (['img_seg_pairs'], {}), '(img_seg_pairs)\n', (3298, 3313), False, 'import itertools\n'), ((224, 246), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (238, 246), False, 'import random\n'), ((246, 268), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (260, 268), False, 'import random\n'), ((268, 290), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (282, 290), False, 'import random\n'), ((565, 597), 'os.path.join', 'os.path.join', (['segs_path', '"""*.png"""'], {}), "(segs_path, '*.png')\n", (577, 597), False, 'import os\n'), ((790, 823), 'os.path.join', 'os.path.join', (['segs_path', 'seg_bnme'], {}), '(segs_path, seg_bnme)\n', (802, 823), False, 'import os\n'), ((1262, 1281), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (1272, 1281), False, 'import cv2\n'), ((1749, 1771), 'numpy.rollaxis', 'np.rollaxis', (['img', '(2)', '(0)'], {}), '(img, 2, 0)\n', (1760, 1771), True, 'import numpy as np\n'), ((1992, 2011), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (2002, 2011), False, 'import cv2\n'), ((2588, 2605), 'cv2.imread', 'cv2.imread', (['im_fn'], {}), '(im_fn)\n', (2598, 2605), False, 'import cv2\n'), ((2616, 2634), 'cv2.imread', 'cv2.imread', (['seg_fn'], {}), '(seg_fn)\n', (2626, 2634), False, 'import cv2\n'), ((497, 532), 'os.path.join', 'os.path.join', (['images_path', '"""*.jpeg"""'], {}), "(images_path, '*.jpeg')\n", (509, 532), False, 'import os\n'), ((1421, 1453), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (1431, 1453), False, 'import cv2\n'), ((2819, 2839), 'numpy.max', 'np.max', (['seg[:, :, 0]'], {}), '(seg[:, :, 0])\n', (2825, 2839), True, 'import numpy as np\n'), ((3418, 3435), 'cv2.imread', 'cv2.imread', (['im', '(1)'], {}), '(im, 1)\n', (3428, 3435), False, 'import cv2\n'), ((3447, 3465), 'cv2.imread', 'cv2.imread', (['seg', '(1)'], {}), '(seg, 1)\n', (3457, 3465), False, 'import cv2\n'), ((396, 430), 'os.path.join', 'os.path.join', (['images_path', '"""*.png"""'], {}), "(images_path, '*.png')\n", (408, 430), False, 'import os\n'), ((446, 480), 'os.path.join', 'os.path.join', (['images_path', '"""*.jpg"""'], {}), "(images_path, '*.jpg')\n", (458, 480), False, 'import os\n'), ((1622, 1654), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (1632, 1654), False, 'import cv2\n'), ((2953, 2973), 'numpy.max', 'np.max', (['seg[:, :, 0]'], {}), '(seg[:, :, 0])\n', (2959, 2973), True, 'import numpy as np\n'), ((3727, 3738), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3735, 3738), True, 'import numpy as np\n'), ((3741, 3752), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3749, 3752), True, 'import numpy as np\n'), ((1335, 1367), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (1345, 1367), False, 'import cv2\n'), ((710, 730), 'os.path.basename', 'os.path.basename', (['im'], {}), '(im)\n', (726, 730), False, 'import os\n')] |
# Shipment Details Download script
# Outputs CSV text report for purchased production shipments
#
# Usage:
# python3 ShipmentReport_3x.py "optional API KEY" (if not using env vars)
#
# 0.2 Revised API key display 02 Jan 2020 <EMAIL>
# 0.1 Corrected handling of zero shipments returned 02 Jan 2020 <EMAIL>
# 0.0 Initial version 27 Dec 2019 <EMAIL>
#
# Note: this script makes raw endpoint queries instead of using the easypost
# API Python modules to limit the amount of dependencies that are required
#############################################################################
# Copyright (C) 2019 by EasyPost, Inc. <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
# AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#############################################################################
import csv
import json
import os
import sys
from base64 import b64encode
from datetime import datetime
from http.client import HTTPSConnection
from urllib.parse import urlencode
# environmental var that stores our production API key;
# set to "" if not used
ENV_VAR_API_KEY = ""
# output folder for generated location
# defaults to ~/Documents (Linux/MacOS) or C:\\Users\\<CURRENT_USER_NAME>\\Documents (Windows)
# hard-code to some other path if desired
OUTPUT_FOLDER = os.path.join(os.path.expanduser('~'), 'Documents')
# modify startDate below to suit
startDate = "2019-01-01T00:00:00Z"
endDate = datetime.utcnow().isoformat()
URLBASE = "/v2/"
def getURL(api_key, url, list_data):
"""
inspired by https://stackoverflow.com/a/7000784
"""
# create our connection
conn = HTTPSConnection("api.easypost.com")
# build our authentication header
b64userpassword = b64encode(bytes(api_key + ":", encoding='ascii')).decode("ascii")
headers = {
'Authorization': 'Basic %s' % b64userpassword,
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain',
'User-Agent': 'python3 ShipmentReport_3x.py v0.2',
}
# build our urlecode parameters dictionary by iterating through the values passed in and
# splitting on '='
ueparams = dict(val.split('=') for val in list_data)
params = urlencode(ueparams)
try:
conn.request('GET', f'{URLBASE}{url}', params, headers=headers)
res = conn.getresponse()
print(res.status, res.reason)
res_str = res.read()
data = json.loads(res_str)
except Exception:
data = {}
return data
if __name__ == "__main__":
# first look for the API key passed in from the command line
if len(sys.argv) == 2:
API_KEY = sys.argv[1]
API_KEY = API_KEY.replace('"', '').replace("'", '')
# otherwise, try to load it from the environment
else:
try:
# attempt to read the key from the environment
# N.B. needs to be a production key
API_KEY = os.environ[ENV_VAR_API_KEY]
except Exception:
API_KEY = ''
print(f"Using API key: '{API_KEY[:5]}" + ("*" * (len(API_KEY) - 5)) + "'...")
# retrieve the shipments in pages
# on the first page, just use dates
# each subsequent page, pass in the last seen shipment ID, to force the next page
has_more = True
shipments = []
params = ['page_size=100', f'start_datetime={startDate}', f'end_datetime={endDate}']
while has_more:
data = getURL(API_KEY, 'shipments', params)
if 'shipments' in data and len(data['shipments']) > 0:
for s in data['shipments']:
shipments.append(s)
print(f'Shipments processed: {len(shipments)}')
has_more = data['has_more']
params = [
'page_size=100',
f'start_datetime={startDate}',
f'end_datetime={endDate}',
f'before_id={shipments[-1]["id"]}',
]
else:
has_more = False
# build file name
n = str(datetime.now())
n = n.replace('-', '').replace(' ', '_').replace(':', '')
outfile = os.path.join(OUTPUT_FOLDER, (n + '.csv'))
print(f"Creating file '{outfile}'...")
# format all the returned shipment data
rows = []
for shipment in shipments:
data = [
shipment['created_at'],
shipment['id'],
shipment['reference'],
shipment['mode'],
shipment['to_address']['id'],
shipment['from_address']['id'],
shipment['return_address']['id'],
shipment['buyer_address']['id'],
shipment['parcel']['id'],
]
data += [
shipment['customs_info']['id'] if shipment['customs_info'] else '',
shipment['scan_form']['id'] if shipment['scan_form'] else '',
]
fees = {f['type']: f['amount'] for f in shipment['fees']}
sign = '-' if str(shipment['refund_status']) == 'refunded' else ''
data += [
(sign if ('LabelFee' in fees and float(fees['LabelFee']) > 0.0) else '')
+ (fees['LabelFee'] if 'LabelFee' in fees else ''), # noqa
(sign if ('PostageFee' in fees and float(fees['PostageFee']) > 0.0) else '')
+ (fees['PostageFee'] if 'PostageFee' in fees else ''),
(sign if ('InsuranceFee' in fees and float(fees['InsuranceFee']) > 0.0) else '')
+ (fees['InsuranceFee'] if 'InsuranceFee' in fees else ''),
]
sr = shipment['selected_rate']
data += [shipment['insurance'], sr['id'], sr['carrier'], sr['service'], sr['rate']]
pl = shipment['postage_label']
data += [pl['id'], pl['label_url']]
data += [shipment['is_return'], shipment['tracking_code'], shipment['usps_zone'], shipment['status']]
data += [
shipment['tracker']['id'] if shipment['tracker'] else '',
shipment['tracker']['public_url'] if shipment['tracker'] else '',
]
data += [shipment['refund_status'], shipment['batch_id'], shipment['batch_status'], shipment['batch_message']]
data = [(i if i else '') for i in data]
rows.append(data)
cols = (
'created_at',
'id',
'reference',
'mode',
'to_address.id',
'from_address.id',
'return_address.id',
'buyer_address.id',
'parcel.id',
'customs_info.id',
'scan_form.id',
'label_fee',
'postage_fee',
'insurance_fee',
'insured_value',
'selected_rate.id',
'selected_rate.carrier',
'selected_rate.service',
'selected_rate.rate',
'postage_label.id',
'postage_label.url',
'is_return',
'tracking_code',
'usps_zone',
'status',
'tracker.id',
'tracker.public_url',
'refund_status',
'batch_id',
'batch_status',
'batch_message',
)
# store data in a CSV
with open(outfile, mode='w', encoding='utf-8', newline='\n') as f:
writer = csv.writer(f, dialect='excel', quoting=csv.QUOTE_MINIMAL)
writer.writerow(cols)
writer.writerows(rows)
print(f'Total number of shipments in file: {len(rows)}')
| [
"json.loads",
"datetime.datetime.utcnow",
"http.client.HTTPSConnection",
"csv.writer",
"os.path.join",
"datetime.datetime.now",
"urllib.parse.urlencode",
"os.path.expanduser"
] | [((1847, 1870), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1865, 1870), False, 'import os\n'), ((2158, 2193), 'http.client.HTTPSConnection', 'HTTPSConnection', (['"""api.easypost.com"""'], {}), "('api.easypost.com')\n", (2173, 2193), False, 'from http.client import HTTPSConnection\n'), ((2737, 2756), 'urllib.parse.urlencode', 'urlencode', (['ueparams'], {}), '(ueparams)\n', (2746, 2756), False, 'from urllib.parse import urlencode\n'), ((4595, 4634), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', "(n + '.csv')"], {}), "(OUTPUT_FOLDER, n + '.csv')\n", (4607, 4634), False, 'import os\n'), ((1964, 1981), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1979, 1981), False, 'from datetime import datetime\n'), ((2954, 2973), 'json.loads', 'json.loads', (['res_str'], {}), '(res_str)\n', (2964, 2973), False, 'import json\n'), ((4503, 4517), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4515, 4517), False, 'from datetime import datetime\n'), ((7569, 7626), 'csv.writer', 'csv.writer', (['f'], {'dialect': '"""excel"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(f, dialect='excel', quoting=csv.QUOTE_MINIMAL)\n", (7579, 7626), False, 'import csv\n')] |
# coding: utf-8
#! /usr/bin/env python
# FrequencyJumpLibrary
import numpy as np
from scipy import stats
import math as math
def KM (y, delta_t=1, Moments = [1,2,4,6,8], bandwidth = 1.5, Lowerbound = False, Upperbound = False, Kernel = 'Epanechnikov'): #Kernel-based Regression
Moments = [0] + Moments
length=len(Moments)
n = 5000
Mn = int(n * bandwidth / 10) #Minor n
res = np.zeros([n + Mn, length])
# Epanechnikov kernel: 3/4(1 - x²), x=-1 to x=1
# #Uniform kernel: 1/2, , x=-1 to x=1
Kernel = (3 * (1 - (np.linspace(-1 * bandwidth, 1 * bandwidth, Mn) / bandwidth) ** 2)) / (4 * bandwidth) # Kernel1 = ones([Mn]) / (2 * bandwidth)
yDist = y[1:] - y[:-1]
if (Lowerbound == False):
Min = min(y)
else:
Min = Lowerbound
if (Upperbound == False):
Max = max(y)
else:
Max = Upperbound
space = np.linspace(Min, Max, n + Mn)
b = ((((y[:-1]-Min) / (abs(Max - Min))) * (n))).astype(int)
trueb = np.unique(b[(b>=0)*(b<n)])
for i in trueb:
r = yDist[b==i]
for l in range(length):
res[i:i + Mn, l] += Kernel * (sum(r ** Moments[l]))
res[:, 0][res[:, 0]==0]=1.
for l in range(length-1):
res[:, l+1] = np.divide(res[:, l+1],(res[:, 0] * math.factorial(Moments[l+1]) * (delta_t)))
return res, space
| [
"math.factorial",
"numpy.zeros",
"numpy.linspace",
"numpy.unique"
] | [((437, 463), 'numpy.zeros', 'np.zeros', (['[n + Mn, length]'], {}), '([n + Mn, length])\n', (445, 463), True, 'import numpy as np\n'), ((943, 972), 'numpy.linspace', 'np.linspace', (['Min', 'Max', '(n + Mn)'], {}), '(Min, Max, n + Mn)\n', (954, 972), True, 'import numpy as np\n'), ((1049, 1081), 'numpy.unique', 'np.unique', (['b[(b >= 0) * (b < n)]'], {}), '(b[(b >= 0) * (b < n)])\n', (1058, 1081), True, 'import numpy as np\n'), ((1348, 1378), 'math.factorial', 'math.factorial', (['Moments[l + 1]'], {}), '(Moments[l + 1])\n', (1362, 1378), True, 'import math as math\n'), ((587, 633), 'numpy.linspace', 'np.linspace', (['(-1 * bandwidth)', '(1 * bandwidth)', 'Mn'], {}), '(-1 * bandwidth, 1 * bandwidth, Mn)\n', (598, 633), True, 'import numpy as np\n')] |
# coding: utf-8
"""
Accounting Extension
These APIs allow you to interact with HubSpot's Accounting Extension. It allows you to: * Specify the URLs that HubSpot will use when making webhook requests to your external accounting system. * Respond to webhook calls made to your external accounting system by HubSpot # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.extensions.accounting.configuration import Configuration
class CreateUserAccountRequestExternal(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"account_id": "str", "account_name": "str", "currency_code": "str"}
attribute_map = {
"account_id": "accountId",
"account_name": "accountName",
"currency_code": "currencyCode",
}
def __init__(
self,
account_id=None,
account_name=None,
currency_code=None,
local_vars_configuration=None,
): # noqa: E501
"""CreateUserAccountRequestExternal - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._account_id = None
self._account_name = None
self._currency_code = None
self.discriminator = None
self.account_id = account_id
self.account_name = account_name
self.currency_code = currency_code
@property
def account_id(self):
"""Gets the account_id of this CreateUserAccountRequestExternal. # noqa: E501
The id of the account in your system. # noqa: E501
:return: The account_id of this CreateUserAccountRequestExternal. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this CreateUserAccountRequestExternal.
The id of the account in your system. # noqa: E501
:param account_id: The account_id of this CreateUserAccountRequestExternal. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation and account_id is None
): # noqa: E501
raise ValueError(
"Invalid value for `account_id`, must not be `None`"
) # noqa: E501
self._account_id = account_id
@property
def account_name(self):
"""Gets the account_name of this CreateUserAccountRequestExternal. # noqa: E501
The name of the account in your system. This is normally the name visible to your users. # noqa: E501
:return: The account_name of this CreateUserAccountRequestExternal. # noqa: E501
:rtype: str
"""
return self._account_name
@account_name.setter
def account_name(self, account_name):
"""Sets the account_name of this CreateUserAccountRequestExternal.
The name of the account in your system. This is normally the name visible to your users. # noqa: E501
:param account_name: The account_name of this CreateUserAccountRequestExternal. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and account_name is None
): # noqa: E501
raise ValueError(
"Invalid value for `account_name`, must not be `None`"
) # noqa: E501
self._account_name = account_name
@property
def currency_code(self):
"""Gets the currency_code of this CreateUserAccountRequestExternal. # noqa: E501
The default currency that this account uses. # noqa: E501
:return: The currency_code of this CreateUserAccountRequestExternal. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this CreateUserAccountRequestExternal.
The default currency that this account uses. # noqa: E501
:param currency_code: The currency_code of this CreateUserAccountRequestExternal. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and currency_code is None
): # noqa: E501
raise ValueError(
"Invalid value for `currency_code`, must not be `None`"
) # noqa: E501
self._currency_code = currency_code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateUserAccountRequestExternal):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CreateUserAccountRequestExternal):
return True
return self.to_dict() != other.to_dict()
| [
"six.iteritems",
"hubspot.crm.extensions.accounting.configuration.Configuration"
] | [((5163, 5196), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (5176, 5196), False, 'import six\n'), ((1613, 1628), 'hubspot.crm.extensions.accounting.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1626, 1628), False, 'from hubspot.crm.extensions.accounting.configuration import Configuration\n')] |
# -*- coding: utf-8 -*-
# @Time : 2020/4/2 22:55
# @Author : Nixin
# @Email : <EMAIL>
# @File : action_douyin.py
# @Software: PyCharm
from appium import webdriver
from time import sleep
import random
class Action():
def __init__(self):
# 初始化配置,设置Desired Capabilities参数
self.desired_caps = {
"platformName": "Android",
"deviceName": "192.168.0.135:5555",
"appPackage": "com.ss.android.ugc.aweme.lite",
"appActivity": "com.ss.android.ugc.aweme.main.MainActivity",
'newCommandTimeout': "36000",
"noReset": True,
"noSign": True
}
# 指定Appium Server
self.server = 'http://localhost:4723/wd/hub'
# 新建一个Session
self.driver = webdriver.Remote(self.server, self.desired_caps)
print(self.driver.get_window_size())
# 设置滑动初始坐标和滑动距离
self.x = self.driver.get_window_size()['width']
self.y = self.driver.get_window_size()['height']
self.start_x = 1/2*self.x
self.start_y = 1/2*self.y
self.distance = 120
def comments(self):
sleep(3)
# app开启之后点击一次屏幕,确保页面的展示
# self.driver.tap([(360, 604)], 500)
def scroll(self):
# 无限滑动
while True:
# 设置延时等待 5-10秒 随机
r = random.choice(range(3, 11))
print("%d秒后再滑屏:%d,%d,%d,%d" % (r, self.start_x, int(1 / 2 * self.y), self.start_x, int(1 / 6 * self.y)))
sleep(r)
# 模拟滑动
self.driver.swipe(self.start_x, int(1/2*self.y), self.start_x, int(1/6*self.y), 300)
def start(self):
self.comments()
self.scroll()
if __name__ == '__main__':
action = Action()
action.start()
pass
| [
"time.sleep",
"appium.webdriver.Remote"
] | [((773, 821), 'appium.webdriver.Remote', 'webdriver.Remote', (['self.server', 'self.desired_caps'], {}), '(self.server, self.desired_caps)\n', (789, 821), False, 'from appium import webdriver\n'), ((1134, 1142), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (1139, 1142), False, 'from time import sleep\n'), ((1481, 1489), 'time.sleep', 'sleep', (['r'], {}), '(r)\n', (1486, 1489), False, 'from time import sleep\n')] |
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from my_web_project.common.forms import BootstrapFormMixin
from my_web_project.main.models import Student, Teacher
class StudentForm(BootstrapFormMixin,forms.ModelForm):
class Meta:
model = Student
# fields = '__all__'
exclude = ('user','is_complete')
class TeacherForm(BootstrapFormMixin,forms.ModelForm):
class Meta:
model = Teacher
exclude = ('user', 'is_complete')
class LoginForm(BootstrapFormMixin,forms.Form):
user = None
username = forms.CharField(max_length=30, )
password = forms.CharField(
max_length=15,
widget=forms.PasswordInput(),
)
def clean(self):
self.user = authenticate(
username=self.cleaned_data['username'],
password=self.cleaned_data['password'],
)
if not self.user:
raise ValidationError('Incorrect username and/or passworord ')
def save(self):
return self.user
class MyUserCreationForm(BootstrapFormMixin,UserCreationForm):
pass
# class Meta:
# model = User
# fields = ("username","is_staff",)
| [
"django.contrib.auth.authenticate",
"django.core.exceptions.ValidationError",
"django.forms.PasswordInput",
"django.forms.CharField"
] | [((723, 753), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (738, 753), False, 'from django import forms\n'), ((899, 996), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': "self.cleaned_data['username']", 'password': "self.cleaned_data['password']"}), "(username=self.cleaned_data['username'], password=self.\n cleaned_data['password'])\n", (911, 996), False, 'from django.contrib.auth import authenticate\n'), ((827, 848), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {}), '()\n', (846, 848), False, 'from django import forms\n'), ((1072, 1128), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Incorrect username and/or passworord """'], {}), "('Incorrect username and/or passworord ')\n", (1087, 1128), False, 'from django.core.exceptions import ValidationError\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 10:58:27 2020
Experiments where one marginal is fixed
"""
import os
import numpy as np
from joblib import Parallel, delayed
import torch
import ot
from unbalancedgw.batch_stable_ugw_solver import log_batch_ugw_sinkhorn
from unbalancedgw._batch_utils import compute_batch_flb_plan
import utils
from partial_gw import compute_cost_matrices
folder = "marginals_without_rescaling"
path = os.getcwd() + "/saved_plans"
if not os.path.isdir(path):
os.mkdir(path)
path = path + "/" + folder
if not os.path.isdir(path):
os.mkdir(path)
def euclid_dist(x, y):
"""
Computes the euclidean distance between two pointclouds, returning a
matrix whose coordinates are the distance between two points.
Parameters
----------
x: torch.Tensor of size [size_X, dim]
coordinates of the first group of vectors of R^d.
y: torch.Tensor of size [size_Y, dim]
coordinates of the second group of vectors of R^d.
Returns
-------
torch.Tensor of size [size_X, size_Y]
Matrix of all pairwise distances.
"""
return (x[:, None, :] - y[None, :, :]).norm(p=2, dim=2)
def prepare_initialisation(dataset_p, dataset_u, n_pos, n_unl, prior, nb_try):
"""
Compute the tensor used as initialization for UGW.
The init is obtained by solving partial EMD as in Chapel et al. when the
domains are the same.
Parameters
----------
dataset_p: string
name of the dataset used for positive data
dataset_u: string
name of the dataset used for unlabeled data
n_pos: int
number of positives samples
n_unl: int
number of unlabeled samples
prior: float
proportion of positive samples in the unlabeled dataset
nb_try: int
number of folds to perform PU learning
Returns
-------
init_plan: torch.Tensor of size [nb_try, n_pos, n_unl]
Set of initialization plans used to init UGW.
"""
init_plan = torch.zeros([nb_try, n_pos, n_unl])
for i in range(nb_try):
# Draw dataset
P, U, _ = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,
n_unl, prior, seed_nb=i)
Ctot, C1, C2, mu, nu = compute_cost_matrices(P, U, prior,
nb_dummies=10)
# Compute init
init_plan[i] = torch.tensor(ot.emd(mu, nu, Ctot)[:n_pos, :])
return init_plan
def compute_plan_ugw(dataset_p, dataset_u, n_pos, n_unl, prior, eps, rho, rho2,
nb_try, device=0):
# Set default type and GPU device
torch.cuda.set_device(device)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# keep constant to normalize cost, uniform over folds by taking first batch
# P, U, _ = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos, n_unl,
# prior, 0)
# U = torch.tensor(U.values,dtype=torch.float) # Convert to torch
# cst_norm = euclid_dist(U, U).max()
# Draw cost for all seeds as batch
Cx = torch.zeros([nb_try, n_pos, n_pos])
Cy = torch.zeros([nb_try, n_unl, n_unl])
for i in range(nb_try):
P, U, y_u = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,
n_unl, prior, seed_nb=i)
P, U = torch.tensor(P.values, dtype=torch.float), \
torch.tensor(U.values, dtype=torch.float)
cx, cy = euclid_dist(P, P), euclid_dist(U, U)
Cx[i], Cy[i] = cx, cy
# Cx[i], Cy[i] = cx / cst_norm, cy / cst_norm
del cx, cy
# Compute init and weights
mu = (torch.ones([n_pos]) / n_pos).expand(nb_try, -1)
nu = (torch.ones([n_unl]) / n_unl).expand(nb_try, -1)
if P.shape[1] == U.shape[1]: # If domains are the same
init_plan = prepare_initialisation(dataset_p, dataset_u, n_pos, n_unl,
prior, nb_try)
else:
_, _, init_plan = compute_batch_flb_plan(
mu, Cx, nu, Cy, eps=eps, rho=rho, rho2=rho2,
nits_sinkhorn=50000, tol_sinkhorn=1e-5)
# Compute the marginal of init and save as file
pi_numpy = init_plan.sum(dim=1).cpu().data.numpy()
fname = f'/ugw_init_{dataset_p}_{n_pos}_{dataset_u}_{n_unl}_' \
f'prior{prior}_eps{eps}_rho{rho}_rho{rho2}_reps{nb_try}.npy'
np.save(path + fname, pi_numpy)
# Set params and start the grid wrt entropic param eps
pi = log_batch_ugw_sinkhorn(mu, Cx, nu, Cy, init=init_plan,
eps=eps, rho=rho, rho2=rho2,
nits_plan=3000, tol_plan=1e-5,
nits_sinkhorn=3000, tol_sinkhorn=1e-6)
if torch.any(torch.isnan(pi)):
raise Exception(f"Solver got NaN plan with params (eps, rho) = "
f"{dataset_p, dataset_u, nb_try, eps, rho, rho2}")
# Compute the marginal and save as file
pi_numpy = pi.sum(dim=1).cpu().data.numpy()
fname = f'/ugw_plan_{dataset_p}_{n_pos}_{dataset_u}_{n_unl}_' \
f'prior{prior}_eps{eps}_rho{rho}_rho{rho2}_reps{nb_try}.npy'
np.save(path + fname, pi_numpy)
print(
f"DONE = Dataset {dataset_p, dataset_u}, eps = {eps}, "
f"rho = {rho, rho2}, reps = {nb_try}")
return
if __name__ == '__main__':
parallel_gpu = True
# epsilon Set to 2**-9 but an be optimized via grid-search
grid_eps = [2. ** k for k in range(-9, -8, 1)]
grid_rho = [2. ** k for k in range(-10, -4, 1)]
nb_try = 40
# List all tasks for the Caltech datasets
list_tasks = []
# # Matching similar features - prior set to 10%
n_pos, n_unl, prior = 100, 100, 0.1
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam',
'decaf_dslr']
list_data = [('surf_Caltech', d) for d in list_surf] + [
('decaf_caltech', d) for d in list_decaf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# # Matching similar features - prior set to 20%
n_pos, n_unl, prior = 100, 100, 0.2
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam']
list_data = [('surf_Caltech', d) for d in list_surf] + [
('decaf_caltech', d) for d in list_decaf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# Matching different features - prior set to 10%
n_pos, n_unl, prior = 100, 100, 0.1
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam',
'decaf_dslr']
list_data = [('surf_Caltech', d) for d in list_decaf] + [
('decaf_caltech', d) for d in list_surf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# # Matching different features - prior set to 20%
n_pos, n_unl, prior = 100, 100, 0.2
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam']
list_data = [('surf_Caltech', d) for d in list_decaf] + [
('decaf_caltech', d) for d in list_surf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
if parallel_gpu:
assert torch.cuda.is_available()
list_device = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
total_devices = torch.cuda.device_count()
print(
f"Parallel computation // Total GPUs available = {total_devices}")
pll = Parallel(n_jobs=total_devices)
iterator = (
delayed(compute_plan_ugw)(data_pos, data_unl, n_pos, n_unl, prior,
eps, rho, rho2, nb_try,
device=list_device[k % total_devices])
for
k, (
data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2,
nb_try) in
enumerate(list_tasks))
pll(iterator)
else:
print("Not Parallel")
for (data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2,
nb_try) in list_tasks:
compute_plan_ugw(data_pos, data_unl, n_pos, n_unl, prior, eps, rho,
rho2, nb_try)
print(f'{data_pos, data_unl} done.')
| [
"unbalancedgw._batch_utils.compute_batch_flb_plan",
"unbalancedgw.batch_stable_ugw_solver.log_batch_ugw_sinkhorn",
"torch.cuda.device_count",
"torch.cuda.is_available",
"numpy.save",
"utils.draw_p_u_dataset_scar",
"torch.set_default_tensor_type",
"partial_gw.compute_cost_matrices",
"os.path.isdir",
... | [((461, 472), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (470, 472), False, 'import os\n'), ((497, 516), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (510, 516), False, 'import os\n'), ((522, 536), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (530, 536), False, 'import os\n'), ((571, 590), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (584, 590), False, 'import os\n'), ((596, 610), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (604, 610), False, 'import os\n'), ((1993, 2028), 'torch.zeros', 'torch.zeros', (['[nb_try, n_pos, n_unl]'], {}), '([nb_try, n_pos, n_unl])\n', (2004, 2028), False, 'import torch\n'), ((2637, 2666), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (2658, 2666), False, 'import torch\n'), ((2671, 2726), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.cuda.FloatTensor"""'], {}), "('torch.cuda.FloatTensor')\n", (2700, 2726), False, 'import torch\n'), ((3103, 3138), 'torch.zeros', 'torch.zeros', (['[nb_try, n_pos, n_pos]'], {}), '([nb_try, n_pos, n_pos])\n', (3114, 3138), False, 'import torch\n'), ((3148, 3183), 'torch.zeros', 'torch.zeros', (['[nb_try, n_unl, n_unl]'], {}), '([nb_try, n_unl, n_unl])\n', (3159, 3183), False, 'import torch\n'), ((4399, 4430), 'numpy.save', 'np.save', (['(path + fname)', 'pi_numpy'], {}), '(path + fname, pi_numpy)\n', (4406, 4430), True, 'import numpy as np\n'), ((4500, 4663), 'unbalancedgw.batch_stable_ugw_solver.log_batch_ugw_sinkhorn', 'log_batch_ugw_sinkhorn', (['mu', 'Cx', 'nu', 'Cy'], {'init': 'init_plan', 'eps': 'eps', 'rho': 'rho', 'rho2': 'rho2', 'nits_plan': '(3000)', 'tol_plan': '(1e-05)', 'nits_sinkhorn': '(3000)', 'tol_sinkhorn': '(1e-06)'}), '(mu, Cx, nu, Cy, init=init_plan, eps=eps, rho=rho,\n rho2=rho2, nits_plan=3000, tol_plan=1e-05, nits_sinkhorn=3000,\n tol_sinkhorn=1e-06)\n', (4522, 4663), False, 'from unbalancedgw.batch_stable_ugw_solver import log_batch_ugw_sinkhorn\n'), ((5171, 5202), 'numpy.save', 'np.save', (['(path + fname)', 'pi_numpy'], {}), '(path + fname, pi_numpy)\n', (5178, 5202), True, 'import numpy as np\n'), ((2098, 2183), 'utils.draw_p_u_dataset_scar', 'utils.draw_p_u_dataset_scar', (['dataset_p', 'dataset_u', 'n_pos', 'n_unl', 'prior'], {'seed_nb': 'i'}), '(dataset_p, dataset_u, n_pos, n_unl, prior,\n seed_nb=i)\n', (2125, 2183), False, 'import utils\n'), ((2257, 2306), 'partial_gw.compute_cost_matrices', 'compute_cost_matrices', (['P', 'U', 'prior'], {'nb_dummies': '(10)'}), '(P, U, prior, nb_dummies=10)\n', (2278, 2306), False, 'from partial_gw import compute_cost_matrices\n'), ((3232, 3317), 'utils.draw_p_u_dataset_scar', 'utils.draw_p_u_dataset_scar', (['dataset_p', 'dataset_u', 'n_pos', 'n_unl', 'prior'], {'seed_nb': 'i'}), '(dataset_p, dataset_u, n_pos, n_unl, prior,\n seed_nb=i)\n', (3259, 3317), False, 'import utils\n'), ((4013, 4125), 'unbalancedgw._batch_utils.compute_batch_flb_plan', 'compute_batch_flb_plan', (['mu', 'Cx', 'nu', 'Cy'], {'eps': 'eps', 'rho': 'rho', 'rho2': 'rho2', 'nits_sinkhorn': '(50000)', 'tol_sinkhorn': '(1e-05)'}), '(mu, Cx, nu, Cy, eps=eps, rho=rho, rho2=rho2,\n nits_sinkhorn=50000, tol_sinkhorn=1e-05)\n', (4035, 4125), False, 'from unbalancedgw._batch_utils import compute_batch_flb_plan\n'), ((4767, 4782), 'torch.isnan', 'torch.isnan', (['pi'], {}), '(pi)\n', (4778, 4782), False, 'import torch\n'), ((7996, 8021), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8019, 8021), False, 'import torch\n'), ((8111, 8136), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8134, 8136), False, 'import torch\n'), ((8245, 8275), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'total_devices'}), '(n_jobs=total_devices)\n', (8253, 8275), False, 'from joblib import Parallel, delayed\n'), ((3377, 3418), 'torch.tensor', 'torch.tensor', (['P.values'], {'dtype': 'torch.float'}), '(P.values, dtype=torch.float)\n', (3389, 3418), False, 'import torch\n'), ((3437, 3478), 'torch.tensor', 'torch.tensor', (['U.values'], {'dtype': 'torch.float'}), '(U.values, dtype=torch.float)\n', (3449, 3478), False, 'import torch\n'), ((2419, 2439), 'ot.emd', 'ot.emd', (['mu', 'nu', 'Ctot'], {}), '(mu, nu, Ctot)\n', (2425, 2439), False, 'import ot\n'), ((3674, 3693), 'torch.ones', 'torch.ones', (['[n_pos]'], {}), '([n_pos])\n', (3684, 3693), False, 'import torch\n'), ((3732, 3751), 'torch.ones', 'torch.ones', (['[n_unl]'], {}), '([n_unl])\n', (3742, 3751), False, 'import torch\n'), ((8309, 8334), 'joblib.delayed', 'delayed', (['compute_plan_ugw'], {}), '(compute_plan_ugw)\n', (8316, 8334), False, 'from joblib import Parallel, delayed\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 18:44:04 2018
@author: JavaWizards
"""
import numpy as np
file = "/Users/nuno_chicoria/Downloads/b_should_be_easy.in"
handle = open(file)
R, C, F, N, B, T = handle.readline().split()
rides = []
index = []
for i in range(int(N)):
index.append(i)
for line in handle:
rides.append(line.split())
rides_np = np.asarray(rides)
rides_np = np.column_stack([rides_np, index])
rides_np = rides_np.astype(np.int)
rides_np = rides_np[rides_np[:,5].argsort()]
vehicles = {}
for i in range(int(F)):
vehicles [i] = ["A", [0, 0], [0, 0], [0, 0], []]
for i in range(int(T)):
rides_np = rides_np[rides_np[:,5] > i]
for item in range(len(vehicles)):
if vehicles[item][0] == "A":
if rides_np.size != 0:
if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i >= rides_np[0, 4]:
if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i + abs(rides_np[0,0] - rides_np[0,2]) + abs(rides_np[0,1] - rides_np[0,3]) <= rides_np[0, 5]:
vehicles[item][0] = "C"
vehicles[item][2] = [rides_np[0, 0], rides_np[0, 1]]
vehicles[item][3] = [rides_np[0, 2], rides_np[0, 3]]
vehicles[item][4].append(rides_np[0, 6])
rides_np = np.delete(rides_np, (0), axis=0)
else:
rides_np = np.delete(rides_np, (0), axis=0)
for item in range(len(vehicles)):
if vehicles[item][0] == "C":
if vehicles[item][1][0] < vehicles[item][2][0]:
vehicles[item][1][0] = vehicles[item][1][0] + 1
elif vehicles[item][1][0] > vehicles[item][2][0]:
vehicles[item][1][0] = vehicles[item][1][0] - 1
elif vehicles[item][1][0] == vehicles[item][2][0]:
if vehicles[item][1][1] < vehicles[item][2][1]:
vehicles[item][1][1] = vehicles[item][1][1] + 1
elif vehicles[item][1][1] > vehicles[item][2][1]:
vehicles[item][1][1] = vehicles[item][1][1] - 1
else:
vehicles[item][0] = "D"
for item in range(len(vehicles)):
if vehicles[item][0] == "D":
if vehicles[item][1][0] < vehicles[item][3][0]:
vehicles[item][1][0] += 1
elif vehicles[item][1][0] > vehicles[item][3][0]:
vehicles[item][1][0] -= 1
elif vehicles[item][1][0] == vehicles[item][3][0]:
if vehicles[item][1][1] < vehicles[item][3][1]:
vehicles[item][1][1] += 1
elif vehicles[item][1][1] > vehicles[item][3][1]:
vehicles[item][1][1] -= 1
else:
vehicles[item][0] = "A"
vehicles[item][2] = None
vehicles[item][3] = None
results = open("ghc2018.txt", "w+")
for item in range(len(vehicles)):
if len(vehicles[item][4]) !=0:
results.write(str(len(vehicles[item][4])))
for ride in vehicles[item][4]:
results.write(" ")
results.write(str(ride))
results.write("\n")
results.close()
| [
"numpy.delete",
"numpy.asarray",
"numpy.column_stack"
] | [((397, 414), 'numpy.asarray', 'np.asarray', (['rides'], {}), '(rides)\n', (407, 414), True, 'import numpy as np\n'), ((426, 460), 'numpy.column_stack', 'np.column_stack', (['[rides_np, index]'], {}), '([rides_np, index])\n', (441, 460), True, 'import numpy as np\n'), ((1455, 1485), 'numpy.delete', 'np.delete', (['rides_np', '(0)'], {'axis': '(0)'}), '(rides_np, 0, axis=0)\n', (1464, 1485), True, 'import numpy as np\n'), ((1549, 1579), 'numpy.delete', 'np.delete', (['rides_np', '(0)'], {'axis': '(0)'}), '(rides_np, 0, axis=0)\n', (1558, 1579), True, 'import numpy as np\n')] |
import arcade
from utils.loader import Loader
class keyFlags():
def __init__(self):
self.left = False
self.right = False
self.up = False
self.down = False
self.space = False
TITLE = "Raiden Py"
WINDOW = None
WIDTH = 600
HEIGHT = 600
SCREEN_WIDTH = WIDTH
SCREEN_HEIGHT = HEIGHT
bullets = []
enemies = []
l = Loader()
print("load Start")
l.load()
print("load End")
enemyBullets = arcade.SpriteList()
playerBullets = arcade.SpriteList()
enemies = arcade.SpriteList()
explosions = arcade.SpriteList()
playerKills = 0
def getPlayerKills():
return playerKills
def addOneToPlayerKills():
global playerKills
playerKills += 1
| [
"arcade.SpriteList",
"utils.loader.Loader"
] | [((358, 366), 'utils.loader.Loader', 'Loader', ([], {}), '()\n', (364, 366), False, 'from utils.loader import Loader\n'), ((430, 449), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (447, 449), False, 'import arcade\n'), ((466, 485), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (483, 485), False, 'import arcade\n'), ((496, 515), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (513, 515), False, 'import arcade\n'), ((529, 548), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (546, 548), False, 'import arcade\n')] |
from tnparser.pipeline import read_pipelines, Pipeline
text1="I have a dog! Let's see what I can do with Silo.ai. :) Can I tokenize it? I think so! Heading: This is the heading And here continues a new sentence and there's no dot."
text2="Some other text, to see we can tokenize more stuff without reloading the model... :)"
# What do we have for English in models_en_ewt?
available_pipelines=read_pipelines("models_en_ewt/pipelines.yaml") # {pipeline_name -> its steps}
p=Pipeline(available_pipelines["tokenize"]) # launch the pipeline from the steps
for _ in range(1000):
print(p.parse(text1))
print(p.parse(text2))
| [
"tnparser.pipeline.Pipeline",
"tnparser.pipeline.read_pipelines"
] | [((396, 442), 'tnparser.pipeline.read_pipelines', 'read_pipelines', (['"""models_en_ewt/pipelines.yaml"""'], {}), "('models_en_ewt/pipelines.yaml')\n", (410, 442), False, 'from tnparser.pipeline import read_pipelines, Pipeline\n'), ((490, 531), 'tnparser.pipeline.Pipeline', 'Pipeline', (["available_pipelines['tokenize']"], {}), "(available_pipelines['tokenize'])\n", (498, 531), False, 'from tnparser.pipeline import read_pipelines, Pipeline\n')] |
# -*- coding: utf-8 -*-
"""A rate network for neutral hydrogen following
Katz, Weinberg & Hernquist 1996, eq. 28-32."""
import os.path
import math
import numpy as np
import scipy.interpolate as interp
import scipy.optimize
class RateNetwork(object):
"""A rate network for neutral hydrogen following
Katz, Weinberg & Hernquist 1996, astro-ph/9509107, eq. 28-32.
Most internal methods are CamelCapitalized and follow a convention that
they are named like the process and then the ion they refer to.
eg:
CollisionalExciteHe0 is the neutral Helium collisional excitation rate.
RecombHp is the recombination rate for ionized hydrogen.
Externally useful methods (the API) are named like get_*.
These are:
get_temp() - gets the temperature from the density and internal energy.
get_cooling_rate() - gets the total cooling rate from density and internal energy.
get_neutral_fraction() - gets the neutral fraction from the rate network given density and internal energy.
Two useful helper functions:
get_equilib_ne() - gets the equilibrium electron density.
get_ne_by_nh() - gets the above, divided by the hydrogen density (Gadget reports this as ElectronAbundance).
Constructor arguments:
redshift - the redshift at which to evaluate the cooling. Affects the photoionization rate,
the Inverse Compton cooling and the self shielding threshold.
photo_factor - Factor by which to multiply the UVB amplitude.
f_bar - Baryon fraction. Omega_b / Omega_cdm.
converge - Tolerance to which the rate network should be converged.
selfshield - Flag to enable self-shielding following Rahmati 2013
cool - which cooling rate coefficient table to use.
Supported are: KWH (original Gadget rates)
Nyx (rates used in Nyx (Lukic 2015))
Sherwood (rates used in Sherwood simulations (Bolton 2017))
Default is Sherwood
recomb - which recombination rate table to use.
Supported are: C92 (Cen 1992, the Gadget default)
V96 (Verner & Ferland 1996, more accurate rates).
B06 (Badnell 2006 rates, current cloudy defaults. Very similar to V96).
collisional - Flag to enable collisional ionizations.
treecool_file - File to read a UV background from. Matches format used by Gadget.
"""
def __init__(self,redshift, photo_factor = 1., f_bar = 0.17, converge = 1e-7, selfshield=True, cool="Sherwood", recomb="V96", collisional=True, treecool_file="data/TREECOOL_ep_2018p"):
if recomb == "V96":
self.recomb = RecombRatesVerner96()
elif recomb == "B06":
self.recomb = RecombRatesBadnell()
else:
self.recomb = RecombRatesCen92()
self.photo = PhotoRates(treecool_file=treecool_file)
self.photo_factor = photo_factor
self.f_bar = f_bar
if cool == "KWH":
self.cool = CoolingRatesKWH92()
elif cool == "Sherwood":
self.cool = CoolingRatesSherwood()
elif cool == "Nyx":
self.cool = CoolingRatesNyx()
else:
raise ValueError("Not supported")
#Extra helium reionization photoheating model
self.hub = 0.7
self.he_thresh = 10
self.he_amp = 1
self.he_exp = 0
self.he_model_on = False
#proton mass in g
self.protonmass = 1.67262178e-24
self.redshift = redshift
self.converge = converge
self.selfshield = selfshield
self.collisional = collisional
zz = [0, 1, 2, 3, 4, 5, 6, 7, 8]
#Tables for the self-shielding correction. Note these are not well-measured for z > 5!
gray_opac = [2.59e-18,2.37e-18,2.27e-18, 2.15e-18, 2.02e-18, 1.94e-18, 1.82e-18, 1.71e-18, 1.60e-18]
self.Gray_ss = interp.InterpolatedUnivariateSpline(zz, gray_opac)
def get_temp(self, density, ienergy, helium=0.24):
"""Get the equilibrium temperature at given internal energy.
density is gas density in protons/cm^3
Internal energy is in J/kg == 10^-10 ergs/g.
helium is a mass fraction"""
ne = self.get_equilib_ne(density, ienergy, helium)
nh = density * (1-helium)
return self._get_temp(ne/nh, ienergy, helium)
def get_cooling_rate(self, density, ienergy, helium=0.24, photoheating=False):
"""Get the total cooling rate for a temperature and density. Negative means heating."""
ne = self.get_equilib_ne(density, ienergy, helium)
nh = density * (1-helium)
temp = self._get_temp(ne/nh, ienergy, helium)
nH0 = self._nH0(nh, temp, ne)
nHe0 = self._nHe0(nh, temp, ne)
nHp = self._nHp(nh, temp, ne)
nHep = self._nHep(nh, temp, ne)
nHepp = self._nHepp(nh, temp, ne)
#This is the collisional excitation and ionisation rate.
LambdaCollis = ne * (self.cool.CollisionalH0(temp) * nH0 +
self.cool.CollisionalHe0(temp) * nHe0 +
self.cool.CollisionalHeP(temp) * nHep)
LambdaRecomb = ne * (self.cool.RecombHp(temp) * nHp +
self.cool.RecombHeP(temp) * nHep +
self.cool.RecombHePP(temp) * nHepp)
LambdaFF = ne * (self.cool.FreeFree(temp, 1)*(nHp + nHep) + self.cool.FreeFree(temp, 2)*nHepp)
LambdaCmptn = ne * self.cool.InverseCompton(temp, self.redshift)
Lambda = LambdaCollis + LambdaRecomb + LambdaFF + LambdaCmptn
Heating = 0
if photoheating:
Heating = nH0 * self.photo.epsH0(self.redshift)
Heating += nHe0 * self.photo.epsHe0(self.redshift)
Heating += nHep * self.photo.epsHep(self.redshift)
Heating *= self.photo_factor
if self.he_model_on:
Heating *= self._he_reion_factor(density)
return Lambda - Heating
def get_equilib_ne(self, density, ienergy,helium=0.24):
"""Solve the system of equations for photo-ionisation equilibrium,
starting with ne = nH and continuing until convergence.
density is gas density in protons/cm^3
Internal energy is in J/kg == 10^-10 ergs/g.
helium is a mass fraction.
"""
#Get hydrogen number density
nh = density * (1-helium)
rooted = lambda ne: self._ne(nh, self._get_temp(ne/nh, ienergy, helium=helium), ne, helium=helium)
ne = scipy.optimize.fixed_point(rooted, nh,xtol=self.converge)
assert np.all(np.abs(rooted(ne) - ne) < self.converge)
return ne
def get_ne_by_nh(self, density, ienergy, helium=0.24):
"""Same as above, but get electrons per proton."""
return self.get_equilib_ne(density, ienergy, helium)/(density*(1-helium))
def get_neutral_fraction(self, density, ienergy, helium=0.24):
"""Get the neutral hydrogen fraction at a given temperature and density.
density is gas density in protons/cm^3
Internal energy is in J/kg == 10^-10 ergs/g.
helium is a mass fraction.
"""
ne = self.get_equilib_ne(density, ienergy, helium=helium)
nh = density * (1-helium)
temp = self._get_temp(ne/nh, ienergy, helium)
return self._nH0(nh, temp, ne) / nh
def _nH0(self, nh, temp, ne):
"""The neutral hydrogen number density. Eq. 33 of KWH."""
alphaHp = self.recomb.alphaHp(temp)
GammaeH0 = self.collisional * self.recomb.GammaeH0(temp)
photorate = self.photo.gH0(self.redshift)/ne*self.photo_factor*self._self_shield_corr(nh, temp)
return nh * alphaHp/ (alphaHp + GammaeH0 + photorate)
def _nHp(self, nh, temp, ne):
"""The ionised hydrogen number density. Eq. 34 of KWH."""
return nh - self._nH0(nh, temp, ne)
def _nHep(self, nh, temp, ne):
"""The ionised helium number density, divided by the helium number fraction. Eq. 35 of KWH."""
alphaHep = self.recomb.alphaHep(temp) + self.recomb.alphad(temp)
alphaHepp = self.recomb.alphaHepp(temp)
photofac = self.photo_factor*self._self_shield_corr(nh, temp)
GammaHe0 = self.collisional * self.recomb.GammaeHe0(temp) + self.photo.gHe0(self.redshift)/ne*photofac
GammaHep = self.collisional * self.recomb.GammaeHep(temp) + self.photo.gHep(self.redshift)/ne*photofac
return nh / (1 + alphaHep / GammaHe0 + GammaHep/alphaHepp)
def _nHe0(self, nh, temp, ne):
"""The neutral helium number density, divided by the helium number fraction. Eq. 36 of KWH."""
alphaHep = self.recomb.alphaHep(temp) + self.recomb.alphad(temp)
photofac = self.photo_factor*self._self_shield_corr(nh, temp)
GammaHe0 = self.collisional * self.recomb.GammaeHe0(temp) + self.photo.gHe0(self.redshift)/ne*photofac
return self._nHep(nh, temp, ne) * alphaHep / GammaHe0
def _nHepp(self, nh, temp, ne):
"""The doubly ionised helium number density, divided by the helium number fraction. Eq. 37 of KWH."""
photofac = self.photo_factor*self._self_shield_corr(nh, temp)
GammaHep = self.collisional * self.recomb.GammaeHep(temp) + self.photo.gHep(self.redshift)/ne*photofac
alphaHepp = self.recomb.alphaHepp(temp)
return self._nHep(nh, temp, ne) * GammaHep / alphaHepp
def _ne(self, nh, temp, ne, helium=0.24):
"""The electron number density. Eq. 38 of KWH."""
yy = helium / 4 / (1 - helium)
return self._nHp(nh, temp, ne) + yy * self._nHep(nh, temp, ne) + 2* yy * self._nHepp(nh, temp, ne)
def _self_shield_corr(self, nh, temp):
"""Photoionisation rate as a function of density from Rahmati 2012, eq. 14.
Calculates Gamma_{Phot} / Gamma_{UVB}.
Inputs: hydrogen density, temperature
n_H
The coefficients are their best-fit from appendix A."""
if not self.selfshield:
return np.ones_like(nh)
nSSh = 1.003*self._self_shield_dens(self.redshift, temp)
return 0.98*(1+(nh/nSSh)**1.64)**-2.28+0.02*(1+nh/nSSh)**-0.84
def _self_shield_dens(self,redshift, temp):
"""Calculate the critical self-shielding density. Rahmati 202 eq. 13.
gray_opac is a parameter of the UVB used.
gray_opac is in cm^2 (2.49e-18 is HM01 at z=3)
temp is particle temperature in K
f_bar is the baryon fraction. 0.17 is roughly 0.045/0.265
Returns density in atoms/cm^3"""
T4 = temp/1e4
G12 = self.photo.gH0(redshift)/1e-12
return 6.73e-3 * (self.Gray_ss(redshift) / 2.49e-18)**(-2./3)*(T4)**0.17*(G12)**(2./3)*(self.f_bar/0.17)**(-1./3)
def _he_reion_factor(self, density):
"""Compute a density dependent correction factor to the heating rate which can model the effect of helium reionization.
Argument: Gas density in protons/cm^3."""
#Newton's constant (cgs units)
gravity = 6.672e-8
#100 km/s/Mpc in h/sec
hubble = 3.2407789e-18
omegab = 0.0483
atime = 1/(1+self.redshift)
rhoc = 3 * (self.hub* hubble)**2 /(8* math.pi * gravity)
overden = self.protonmass * density /(omegab * rhoc * atime**(-3))
if overden >= self.he_thresh:
overden = self.he_thresh
return self.he_amp * overden**self.he_exp
def _get_temp(self, nebynh, ienergy, helium=0.24):
"""Compute temperature (in K) from internal energy and electron density.
Uses: internal energy
electron abundance per H atom (ne/nH)
hydrogen mass fraction (0.76)
Internal energy is in J/kg, internal gadget units, == 10^-10 ergs/g.
Factor to convert U (J/kg) to T (K) : U = N k T / (γ - 1)
T = U (γ-1) μ m_P / k_B
where k_B is the Boltzmann constant
γ is 5/3, the perfect gas constant
m_P is the proton mass
μ = 1 / (mean no. molecules per unit atomic weight)
= 1 / (X + Y /4 + E)
where E = Ne * X, and Y = (1-X).
Can neglect metals as they are heavy.
Leading contribution is from electrons, which is already included
[+ Z / (12->16)] from metal species
[+ Z/16*4 ] for OIV from electrons."""
#convert U (J/kg) to T (K) : U = N k T / (γ - 1)
#T = U (γ-1) μ m_P / k_B
#where k_B is the Boltzmann constant
#γ is 5/3, the perfect gas constant
#m_P is the proton mass
#μ is 1 / (mean no. molecules per unit atomic weight) calculated in loop.
#Internal energy units are 10^-10 erg/g
hy_mass = 1 - helium
muienergy = 4 / (hy_mass * (3 + 4*nebynh) + 1)*ienergy*1e10
#Boltzmann constant (cgs)
boltzmann=1.38066e-16
gamma=5./3
#So for T in K, boltzmann in erg/K, internal energy has units of erg/g
temp = (gamma-1) * self.protonmass / boltzmann * muienergy
return temp
class RecombRatesCen92(object):
"""Recombination rates and collisional ionization rates, as a function of temperature.
This is taken from KWH 06, astro-ph/9509107, Table 2, based on Cen 1992.
Illustris uses these rates."""
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
return 8.4e-11 / np.sqrt(temp) / np.power(temp/1000, 0.2) / (1+ np.power(temp/1e6, 0.7))
def alphaHep(self,temp):
"""Recombination rate for He+, ionized helium, in cm^3/s.
Temp in K."""
return 1.5e-10 / np.power(temp,0.6353)
def alphad(self, temp):
"""Recombination rate for dielectronic recombination, in cm^3/s.
Temp in K."""
return 1.9e-3 / np.power(temp,1.5) * np.exp(-4.7e5/temp)*(1+0.3*np.exp(-9.4e4/temp))
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s.
Temp in K."""
return 4 * self.alphaHp(temp)
def GammaeH0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 5.85e-11 * np.sqrt(temp) * np.exp(-157809.1/temp) / (1+ np.sqrt(temp/1e5))
def GammaeHe0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 2.38e-11 * np.sqrt(temp) * np.exp(-285335.4/temp) / (1+ np.sqrt(temp/1e5))
def GammaeHep(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 5.68e-12 * np.sqrt(temp) * np.exp(-631515.0/temp) / (1+ np.sqrt(temp/1e5))
class RecombRatesVerner96(object):
"""Recombination rates and collisional ionization rates, as a function of temperature.
Recombination rates are the fit from Verner & Ferland 1996 (astro-ph/9509083).
Collisional rates are the fit from Voronov 1997 (http://www.sciencedirect.com/science/article/pii/S0092640X97907324).
In a very photoionised medium this changes the neutral hydrogen abundance by approximately 10% compared to Cen 1992.
These rates are those used by Nyx.
"""
def _Verner96Fit(self, temp, aa, bb, temp0, temp1):
"""Formula used as a fitting function in Verner & Ferland 1996 (astro-ph/9509083)."""
sqrttt0 = np.sqrt(temp/temp0)
sqrttt1 = np.sqrt(temp/temp1)
return aa / ( sqrttt0 * (1 + sqrttt0)**(1-bb)*(1+sqrttt1)**(1+bb) )
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
The V&F 96 fitting formula is accurate to < 1% in the worst case.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=7.982e-11, bb=0.748, temp0=3.148, temp1=7.036e+05)
def alphaHep(self,temp):
"""Recombination rate for He+, ionized helium, in cm^3/s.
Accurate to ~2% for T < 10^6 and 5% for T< 10^10.
Temp in K."""
#VF96 give two rates. The first is more accurate for T < 10^6, the second is valid up to T = 10^10.
#We use the most accurate allowed. See lines 2 and 3 of Table 1 of VF96.
lowTfit = self._Verner96Fit(temp, aa=3.294e-11, bb=0.6910, temp0=1.554e+01, temp1=3.676e+07)
highTfit = self._Verner96Fit(temp, aa=9.356e-10, bb=0.7892, temp0=4.266e-02, temp1=4.677e+06)
#Note that at 10^6K the two fits differ by ~10%. This may lead one to disbelieve the quoted accuracies!
#We thus switch over at a slightly lower temperature.
#The two fits cross at T ~ 3e5K.
swtmp = 7e5
deltat = 1e5
upper = swtmp + deltat
lower = swtmp - deltat
#In order to avoid a sharp feature at 10^6 K, we linearly interpolate between the two fits around 10^6 K.
interpfit = (lowTfit * (upper - temp) + highTfit * (temp - lower))/(2*deltat)
return (temp < lower)*lowTfit + (temp > upper)*highTfit + (upper > temp)*(temp > lower)*interpfit
def alphad(self, temp):
"""Recombination rate for dielectronic recombination, in cm^3/s.
This is the value from Aldrovandi & Pequignot 73, as used in Nyx, Sherwood and Cen 1992.
It is corrected from the value in Aldrovandi & Pequignot 1973 by Burgess & Tworkowski 1976 (fig1)
by a factor of 0.65. The exponent is also made slightly more accurate.
Temp in K."""
return 1.23e-3 / np.power(temp,1.5) * np.exp(-4.72e5/temp)*(1+0.3*np.exp(-9.4e4/temp))
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s. Accurate to 2%.
Temp in K."""
#See line 4 of V&F96 table 1.
return self._Verner96Fit(temp, aa=1.891e-10, bb=0.7524, temp0=9.370, temp1=2.774e6)
def _Voronov96Fit(self, temp, dE, PP, AA, XX, KK):
"""Fitting function for collisional rates. Eq. 1 of Voronov 1997. Accurate to 10%,
but data is only accurate to 50%."""
bolevk = 8.61734e-5 # Boltzmann constant in units of eV/K
UU = dE / (bolevk * temp)
return AA * (1 + PP * np.sqrt(UU))/(XX+UU) * UU**KK * np.exp(-UU)
def GammaeH0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 13.6, 0, 0.291e-07, 0.232, 0.39)
def GammaeHe0(self,temp):
"""Collisional ionization rate for He0 in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 24.6, 0, 0.175e-07, 0.180, 0.35)
def GammaeHep(self,temp):
"""Collisional ionization rate for HeI in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 54.4, 1, 0.205e-08, 0.265, 0.25)
class RecombRatesBadnell(RecombRatesVerner96):
"""Recombination rates and collisional ionization rates, as a function of temperature.
Recombination rates are the fit from Badnell's website: http://amdpp.phys.strath.ac.uk/tamoc/RR/#partial.
"""
def _RecombRateFit_lowcharge_ion(self, temp, aa, bb, cc, temp0, temp1, temp2):
"""Formula used as a fitting function in Verner & Ferland 1996 (astro-ph/9509083)/ See http://amdpp.phys.strath.ac.uk/tamoc/RR/#partial."""
sqrttt0 = np.sqrt(temp/temp0)
sqrttt1 = np.sqrt(temp/temp1)
BB = bb + cc*np.exp(-temp2/temp)
return aa / ( sqrttt0 * (1 + sqrttt0)**(1-BB)*(1+sqrttt1)**(1+BB) )
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=8.318e-11, bb=0.7472, temp0=2.965, temp1=7.001e5)
def alphaHep(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=1.818E-10, bb=0.7492, temp0=10.17, temp1=2.786e6)
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s.
Temp in K."""
#See line 4 of V&F96 table 1.
return self._RecombRateFit_lowcharge_ion(temp, aa=5.235E-11, bb=0.6988, cc=0.0829, temp0=7.301, temp1=4.475e6, temp2 = 1.682e5)
class PhotoRates(object):
"""The photoionization rates for a given species.
Eq. 29 of KWH 96. This is loaded from a TREECOOL table."""
def __init__(self, treecool_file="data/TREECOOL_ep_2018p"):
#Format of the treecool table:
# log_10(1+z), Gamma_HI, Gamma_HeI, Gamma_HeII, Qdot_HI, Qdot_HeI, Qdot_HeII,
# where 'Gamma' is the photoionization rate and 'Qdot' is the photoheating rate.
# The Gamma's are in units of s^-1, and the Qdot's are in units of erg s^-1.
try:
data = np.loadtxt(treecool_file)
except OSError:
treefile = os.path.join(os.path.dirname(os.path.realpath(__file__)), treecool_file)
data = np.loadtxt(treefile)
redshifts = data[:,0]
photo_rates = data[:,1:4]
photo_heat = data[:,4:7]
assert np.shape(redshifts)[0] == np.shape(photo_rates)[0]
self.Gamma_HI = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,0])
self.Gamma_HeI = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,1])
self.Gamma_HeII = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,2])
self.Eps_HI = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,0])
self.Eps_HeI = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,1])
self.Eps_HeII = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,2])
def gHe0(self,redshift):
"""Get photo rate for neutral Helium"""
log1z = np.log10(1+redshift)
return self.Gamma_HeI(log1z)
def gHep(self,redshift):
"""Get photo rate for singly ionized Helium"""
log1z = np.log10(1+redshift)
return self.Gamma_HeII(log1z)
def gH0(self,redshift):
"""Get photo rate for neutral Hydrogen"""
log1z = np.log10(1+redshift)
return self.Gamma_HI(log1z)
def epsHe0(self,redshift):
"""Get photo heating rate for neutral Helium"""
log1z = np.log10(1+redshift)
return self.Eps_HeI(log1z)
def epsHep(self,redshift):
"""Get photo heating rate for singly ionized Helium"""
log1z = np.log10(1+redshift)
return self.Eps_HeII(log1z)
def epsH0(self,redshift):
"""Get photo heating rate for neutral Hydrogen"""
log1z = np.log10(1+redshift)
return self.Eps_HI(log1z)
class CoolingRatesKWH92(object):
"""The cooling rates from KWH92, in erg s^-1 cm^-3 (cgs).
All rates are divided by the abundance of the ions involved in the interaction.
So we are computing the cooling rate divided by n_e n_X. Temperatures in K.
None of these rates are original to KWH92, but are taken from Cen 1992,
and originally from older references. The hydrogen rates in particular are probably inaccurate.
Cen 1992 modified (arbitrarily) the excitation and ionisation rates for high temperatures.
There is no collisional excitation rate for He0 - not sure why.
References:
Black 1981, from Lotz 1967, Seaton 1959, Burgess & Seaton 1960.
Recombination rates are from Spitzer 1978.
Free-free: Spitzer 1978.
Collisional excitation and ionisation cooling rates are merged.
"""
def __init__(self, tcmb=2.7255, t5_corr=1e5, recomb=None):
self.tcmb = tcmb
if recomb is None:
self.recomb = RecombRatesCen92()
else:
self.recomb = recomb
self.t5_corr = t5_corr
#1 eV in ergs
self.eVinergs = 1.60218e-12
#boltzmann constant in erg/K
self.kB = 1.38064852e-16
def _t5(self, temp):
"""Commonly used Cen 1992 correction factor for large temperatures.
This is implemented so that the cooling rates have the right
asymptotic behaviour. However, Cen erroneously imposes this correction at T=1e5,
which is too small: the Black 1981 rates these are based on should be good
until 5e5 at least, where the correction factor has a 10% effect already.
More modern tables thus impose it at T=5e7, which is still arbitrary but should be harmless.
"""
return 1+(temp/t5_corr)**0.5
def CollisionalExciteH0(self, temp):
"""Collisional excitation cooling rate for n_H0 and n_e. Gadget calls this BetaH0."""
return 7.5e-19 * np.exp(-118348.0/temp) /self._t5(temp)
def CollisionalExciteHeP(self, temp):
"""Collisional excitation cooling rate for n_He+ and n_e. Gadget calls this BetaHep."""
return 5.54e-17 * temp**(-0.397)*np.exp(-473638./temp)/self._t5(temp)
def CollisionalExciteHe0(self, temp):
"""This is listed in Cen 92 but neglected in KWH 97, presumably because it is very small."""
#return 0
return 9.1e-27 * temp**(-0.1687) * np.exp(-473638/temp) / self._t5(temp)
def CollisionalIonizeH0(self, temp):
"""Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeH0."""
#Ionisation potential of H0
return 13.5984 * self.eVinergs * self.recomb.GammaeH0(temp)
def CollisionalIonizeHe0(self, temp):
"""Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeHe0."""
return 24.5874 * self.eVinergs * self.recomb.GammaeHe0(temp)
def CollisionalIonizeHeP(self, temp):
"""Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeHep."""
return 54.417760 * self.eVinergs * self.recomb.GammaeHep(temp)
def CollisionalH0(self, temp):
"""Total collisional cooling for H0"""
return self.CollisionalExciteH0(temp) + self.CollisionalIonizeH0(temp)
def CollisionalHe0(self, temp):
"""Total collisional cooling for H0"""
return self.CollisionalExciteHe0(temp) + self.CollisionalIonizeHe0(temp)
def CollisionalHeP(self, temp):
"""Total collisional cooling for H0"""
return self.CollisionalExciteHeP(temp) + self.CollisionalIonizeHeP(temp)
def RecombHp(self, temp):
"""Recombination cooling rate for H+ and e. Gadget calls this AlphaHp."""
return 0.75 * self.kB * temp * self.recomb.alphaHp(temp)
def RecombHeP(self, temp):
"""Recombination cooling rate for He+ and e. Gadget calls this AlphaHep."""
#I'm not sure why they use 0.75 kT as the free energy of an electron.
#I would guess this is explained in Spitzer 1978.
return 0.75 * self.kB * temp * self.recomb.alphaHep(temp)+ self._RecombDielect(temp)
def RecombHePP(self, temp):
"""Recombination cooling rate for He++ and e. Gadget calls this AlphaHepp."""
return 0.75 * self.kB * temp * self.recomb.alphaHepp(temp)
def _RecombDielect(self, temp):
"""Dielectric recombination rate for He+ and e. Gadget calls this Alphad."""
#What is this magic number?
return 6.526e-11*self.recomb.alphad(temp)
def FreeFree(self, temp, zz):
"""Free-free cooling rate for electrons scattering on ions without being captured.
Factors here are n_e and total ionized species:
(FreeFree(zz=1)*(n_H+ + n_He+) + FreeFree(zz=2)*n_He++)"""
return 1.426e-27*np.sqrt(temp)*zz**2*self._gff(temp,zz)
def _gff(self, temp, zz):
"""Formula for the Gaunt factor. KWH takes this from Spitzer 1978."""
_ = zz
return 1.1+0.34*np.exp(-(5.5 - np.log10(temp))**2/3.)
def InverseCompton(self, temp, redshift):
"""Cooling rate for inverse Compton from the microwave background.
Multiply this only by n_e. Note the CMB temperature is hardcoded in KWH92 to 2.7."""
tcmb_red = self.tcmb * (1+redshift)
#Thompson cross-section in cm^2
sigmat = 6.6524e-25
#Radiation density constant, 4 sigma_stefan-boltzmann / c in erg cm^-3 K^-4
rad_dens = 7.5657e-15
#Electron mass in g
me = 9.10938e-28
#Speed of light in cm/s
cc = 2.99792e10
return 4 * sigmat * rad_dens / (me*cc) * tcmb_red**4 * self.kB * (temp - tcmb_red)
class CoolingRatesSherwood(CoolingRatesKWH92):
"""The cooling rates used in the Sherwood simulation, Bolton et al 2017, in erg s^-1 cm^-3 (cgs).
Differences from KWH92 are updated recombination and collisional ionization rates, and the use of a
larger temperature correction factor than Cen 92.
"""
def __init__(self, tcmb=2.7255, recomb=None):
CoolingRatesKWH92.__init__(tcmb = tcmb, t5_corr = 5e7, recomb=RecombRatesVerner96)
class CoolingRatesNyx(CoolingRatesKWH92):
"""The cooling rates used in the Nyx paper Lukic 2014, 1406.6361, in erg s^-1 cm^-3 (cgs).
All rates are divided by the abundance of the ions involved in the interaction.
So we are computing the cooling rate divided by n_e n_X. Temperatures in K.
Major differences from KWH are the use of the Scholz & Walter 1991
hydrogen collisional cooling rates, a less aggressive high temperature correction for helium, and
Shapiro & Kang 1987 for free free.
Older Black 1981 recombination cooling rates are used!
They use the recombination rates from Verner & Ferland 96, but do not change the cooling rates to match.
Ditto the ionization rates from Voronov 1997: they should also use these rates for collisional ionisation,
although this is harder because Sholz & Walter don't break their rates into ionization and excitation.
References:
Scholz & Walters 1991 (0.45% accuracy)
Black 1981 (recombination and helium)
Shapiro & Kang 1987
"""
def __init__(self, tcmb=2.7255, recomb=None):
CoolingRatesKWH92.__init__(tcmb = tcmb, t5_corr = 5e7, recomb=recomb)
def CollisionalH0(self, temp):
"""Collisional cooling rate for n_H0 and n_e. Gadget calls this BetaH0 + GammaeH0.
Formula from Eq. 23, Table 4 of Scholz & Walters, claimed good to 0.45 %.
Note though that they have two datasets which differ by a factor of two.
Differs from Cen 92 by a factor of two."""
#Technically only good for T > 2000.
y = np.log(temp)
#Constant is 0.75/k_B in Rydberg
Ryd = 2.1798741e-11
tot = -0.75/self.kB*Ryd/temp
coeffslowT = [213.7913, 113.9492, 25.06062, 2.762755, 0.1515352, 3.290382e-3]
coeffshighT = [271.25446, 98.019455, 14.00728, 0.9780842, 3.356289e-2, 4.553323e-4]
for j in range(6):
tot += ((temp < 1e5)*coeffslowT[j]+(temp >=1e5)*coeffshighT[j])*(-y)**j
return 1e-20 * np.exp(tot)
def RecombHp(self, temp):
"""Recombination cooling rate for H+ and e. Gadget calls this AlphaHp.
Differs by O(10%) until 3x10^6."""
return 2.851e-27 * np.sqrt(temp) * (5.914 - 0.5 * np.log(temp) + 0.01184 * temp**(1./3))
def RecombHePP(self, temp):
"""Recombination cooling rate for H+ and e. Gadget calls this AlphaHepp.
Differs from Cen 92 by 10% until ~10^7"""
return 1.140e-26 * np.sqrt(temp) * (6.607 - 0.5 * np.log(temp) + 7.459e-3 * temp**(1./3))
def _gff(self, temp, zz):
"""Formula for the Gaunt factor from Shapiro & Kang 1987. ZZ is 1 for H+ and He+ and 2 for He++.
This is almost identical to the KWH rate but not continuous."""
#This is not continuous. Check the original reference.
little = (temp/zz**2 <= 3.2e5)
lt = np.log10(temp/zz**2)
return little * (0.79464 + 0.1243*lt) + np.logical_not(little) * ( 2.13164 - 0.1240 * lt)
| [
"numpy.ones_like",
"numpy.shape",
"numpy.log10",
"numpy.sqrt",
"numpy.power",
"numpy.log",
"numpy.logical_not",
"numpy.exp",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.loadtxt"
] | [((3983, 4033), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['zz', 'gray_opac'], {}), '(zz, gray_opac)\n', (4018, 4033), True, 'import scipy.interpolate as interp\n'), ((15357, 15378), 'numpy.sqrt', 'np.sqrt', (['(temp / temp0)'], {}), '(temp / temp0)\n', (15364, 15378), True, 'import numpy as np\n'), ((15395, 15416), 'numpy.sqrt', 'np.sqrt', (['(temp / temp1)'], {}), '(temp / temp1)\n', (15402, 15416), True, 'import numpy as np\n'), ((19238, 19259), 'numpy.sqrt', 'np.sqrt', (['(temp / temp0)'], {}), '(temp / temp0)\n', (19245, 19259), True, 'import numpy as np\n'), ((19276, 19297), 'numpy.sqrt', 'np.sqrt', (['(temp / temp1)'], {}), '(temp / temp1)\n', (19283, 19297), True, 'import numpy as np\n'), ((21120, 21185), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_rates[:, 0]'], {}), '(redshifts, photo_rates[:, 0])\n', (21155, 21185), True, 'import scipy.interpolate as interp\n'), ((21210, 21275), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_rates[:, 1]'], {}), '(redshifts, photo_rates[:, 1])\n', (21245, 21275), True, 'import scipy.interpolate as interp\n'), ((21301, 21366), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_rates[:, 2]'], {}), '(redshifts, photo_rates[:, 2])\n', (21336, 21366), True, 'import scipy.interpolate as interp\n'), ((21388, 21452), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_heat[:, 0]'], {}), '(redshifts, photo_heat[:, 0])\n', (21423, 21452), True, 'import scipy.interpolate as interp\n'), ((21475, 21539), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_heat[:, 1]'], {}), '(redshifts, photo_heat[:, 1])\n', (21510, 21539), True, 'import scipy.interpolate as interp\n'), ((21563, 21627), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_heat[:, 2]'], {}), '(redshifts, photo_heat[:, 2])\n', (21598, 21627), True, 'import scipy.interpolate as interp\n'), ((21721, 21743), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (21729, 21743), True, 'import numpy as np\n'), ((21880, 21902), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (21888, 21902), True, 'import numpy as np\n'), ((22034, 22056), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (22042, 22056), True, 'import numpy as np\n'), ((22195, 22217), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (22203, 22217), True, 'import numpy as np\n'), ((22362, 22384), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (22370, 22384), True, 'import numpy as np\n'), ((22524, 22546), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (22532, 22546), True, 'import numpy as np\n'), ((30270, 30282), 'numpy.log', 'np.log', (['temp'], {}), '(temp)\n', (30276, 30282), True, 'import numpy as np\n'), ((31548, 31572), 'numpy.log10', 'np.log10', (['(temp / zz ** 2)'], {}), '(temp / zz ** 2)\n', (31556, 31572), True, 'import numpy as np\n'), ((10079, 10095), 'numpy.ones_like', 'np.ones_like', (['nh'], {}), '(nh)\n', (10091, 10095), True, 'import numpy as np\n'), ((13713, 13735), 'numpy.power', 'np.power', (['temp', '(0.6353)'], {}), '(temp, 0.6353)\n', (13721, 13735), True, 'import numpy as np\n'), ((18129, 18140), 'numpy.exp', 'np.exp', (['(-UU)'], {}), '(-UU)\n', (18135, 18140), True, 'import numpy as np\n'), ((20747, 20772), 'numpy.loadtxt', 'np.loadtxt', (['treecool_file'], {}), '(treecool_file)\n', (20757, 20772), True, 'import numpy as np\n'), ((30701, 30712), 'numpy.exp', 'np.exp', (['tot'], {}), '(tot)\n', (30707, 30712), True, 'import numpy as np\n'), ((13514, 13540), 'numpy.power', 'np.power', (['(temp / 1000)', '(0.2)'], {}), '(temp / 1000, 0.2)\n', (13522, 13540), True, 'import numpy as np\n'), ((13545, 13576), 'numpy.power', 'np.power', (['(temp / 1000000.0)', '(0.7)'], {}), '(temp / 1000000.0, 0.7)\n', (13553, 13576), True, 'import numpy as np\n'), ((13904, 13928), 'numpy.exp', 'np.exp', (['(-470000.0 / temp)'], {}), '(-470000.0 / temp)\n', (13910, 13928), True, 'import numpy as np\n'), ((14254, 14278), 'numpy.exp', 'np.exp', (['(-157809.1 / temp)'], {}), '(-157809.1 / temp)\n', (14260, 14278), True, 'import numpy as np\n'), ((14283, 14307), 'numpy.sqrt', 'np.sqrt', (['(temp / 100000.0)'], {}), '(temp / 100000.0)\n', (14290, 14307), True, 'import numpy as np\n'), ((14445, 14469), 'numpy.exp', 'np.exp', (['(-285335.4 / temp)'], {}), '(-285335.4 / temp)\n', (14451, 14469), True, 'import numpy as np\n'), ((14474, 14498), 'numpy.sqrt', 'np.sqrt', (['(temp / 100000.0)'], {}), '(temp / 100000.0)\n', (14481, 14498), True, 'import numpy as np\n'), ((14636, 14660), 'numpy.exp', 'np.exp', (['(-631515.0 / temp)'], {}), '(-631515.0 / temp)\n', (14642, 14660), True, 'import numpy as np\n'), ((14665, 14689), 'numpy.sqrt', 'np.sqrt', (['(temp / 100000.0)'], {}), '(temp / 100000.0)\n', (14672, 14689), True, 'import numpy as np\n'), ((17458, 17482), 'numpy.exp', 'np.exp', (['(-472000.0 / temp)'], {}), '(-472000.0 / temp)\n', (17464, 17482), True, 'import numpy as np\n'), ((19317, 19338), 'numpy.exp', 'np.exp', (['(-temp2 / temp)'], {}), '(-temp2 / temp)\n', (19323, 19338), True, 'import numpy as np\n'), ((20912, 20932), 'numpy.loadtxt', 'np.loadtxt', (['treefile'], {}), '(treefile)\n', (20922, 20932), True, 'import numpy as np\n'), ((21045, 21064), 'numpy.shape', 'np.shape', (['redshifts'], {}), '(redshifts)\n', (21053, 21064), True, 'import numpy as np\n'), ((21071, 21092), 'numpy.shape', 'np.shape', (['photo_rates'], {}), '(photo_rates)\n', (21079, 21092), True, 'import numpy as np\n'), ((24528, 24552), 'numpy.exp', 'np.exp', (['(-118348.0 / temp)'], {}), '(-118348.0 / temp)\n', (24534, 24552), True, 'import numpy as np\n'), ((24747, 24771), 'numpy.exp', 'np.exp', (['(-473638.0 / temp)'], {}), '(-473638.0 / temp)\n', (24753, 24771), True, 'import numpy as np\n'), ((24989, 25011), 'numpy.exp', 'np.exp', (['(-473638 / temp)'], {}), '(-473638 / temp)\n', (24995, 25011), True, 'import numpy as np\n'), ((30893, 30906), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (30900, 30906), True, 'import numpy as np\n'), ((31154, 31167), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (31161, 31167), True, 'import numpy as np\n'), ((31617, 31639), 'numpy.logical_not', 'np.logical_not', (['little'], {}), '(little)\n', (31631, 31639), True, 'import numpy as np\n'), ((13498, 13511), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (13505, 13511), True, 'import numpy as np\n'), ((13883, 13902), 'numpy.power', 'np.power', (['temp', '(1.5)'], {}), '(temp, 1.5)\n', (13891, 13902), True, 'import numpy as np\n'), ((13931, 13954), 'numpy.exp', 'np.exp', (['(-94000.0 / temp)'], {}), '(-94000.0 / temp)\n', (13937, 13954), True, 'import numpy as np\n'), ((14238, 14251), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (14245, 14251), True, 'import numpy as np\n'), ((14429, 14442), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (14436, 14442), True, 'import numpy as np\n'), ((14620, 14633), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (14627, 14633), True, 'import numpy as np\n'), ((17437, 17456), 'numpy.power', 'np.power', (['temp', '(1.5)'], {}), '(temp, 1.5)\n', (17445, 17456), True, 'import numpy as np\n'), ((17486, 17509), 'numpy.exp', 'np.exp', (['(-94000.0 / temp)'], {}), '(-94000.0 / temp)\n', (17492, 17509), True, 'import numpy as np\n'), ((27376, 27389), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (27383, 27389), True, 'import numpy as np\n'), ((30924, 30936), 'numpy.log', 'np.log', (['temp'], {}), '(temp)\n', (30930, 30936), True, 'import numpy as np\n'), ((31185, 31197), 'numpy.log', 'np.log', (['temp'], {}), '(temp)\n', (31191, 31197), True, 'import numpy as np\n'), ((18097, 18108), 'numpy.sqrt', 'np.sqrt', (['UU'], {}), '(UU)\n', (18104, 18108), True, 'import numpy as np\n'), ((27578, 27592), 'numpy.log10', 'np.log10', (['temp'], {}), '(temp)\n', (27586, 27592), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Author:: <NAME> (mailto:<EMAIL>)
"""
Simple Flask Server to Expose Credentials
"""
from flask import Flask, jsonify, redirect, render_template, request, session, url_for
from splitwise import Splitwise
from adjuftments.config import SplitwiseConfig
app = Flask(__name__)
app.secret_key = "RandomSecretString"
@app.route("/")
def home():
if 'access_token' in session:
return redirect(url_for("credentials"))
return render_template("home.html")
@app.route("/login")
def login():
splitwise_object = Splitwise(consumer_key=SplitwiseConfig.SPLITWISE_CONSUMER_KEY,
consumer_secret=SplitwiseConfig.SPLITWISE_CONSUMER_SECRET)
url, secret = splitwise_object.getAuthorizeURL()
session['secret'] = secret
return redirect(url)
@app.route("/authorize")
def authorize():
if 'secret' not in session:
return redirect(url_for("home"))
oauth_token = request.args.get('oauth_token')
oauth_verifier = request.args.get('oauth_verifier')
splitwise_object = Splitwise(consumer_key=SplitwiseConfig.SPLITWISE_CONSUMER_KEY,
consumer_secret=SplitwiseConfig.SPLITWISE_CONSUMER_SECRET)
access_token = splitwise_object.getAccessToken(oauth_token, session['secret'], oauth_verifier)
session['access_token'] = access_token
return redirect(url_for("credentials"))
@app.route("/credentials")
def credentials():
credential_dict = dict(SPLITWISE_CONSUMER_KEY=SplitwiseConfig.SPLITWISE_CONSUMER_KEY,
SPLITWISE_CONSUMER_SECRET=SplitwiseConfig.SPLITWISE_CONSUMER_SECRET,
SPLITWISE_OAUTH_TOKEN=session["access_token"]["oauth_token"],
SPLITWISE_OAUTH_SECRET=session["access_token"]["oauth_token_secret"])
return jsonify(credential_dict)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| [
"flask.render_template",
"flask.request.args.get",
"flask.Flask",
"flask.url_for",
"flask.redirect",
"splitwise.Splitwise",
"flask.jsonify"
] | [((289, 304), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (294, 304), False, 'from flask import Flask, jsonify, redirect, render_template, request, session, url_for\n'), ((466, 494), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (481, 494), False, 'from flask import Flask, jsonify, redirect, render_template, request, session, url_for\n'), ((554, 679), 'splitwise.Splitwise', 'Splitwise', ([], {'consumer_key': 'SplitwiseConfig.SPLITWISE_CONSUMER_KEY', 'consumer_secret': 'SplitwiseConfig.SPLITWISE_CONSUMER_SECRET'}), '(consumer_key=SplitwiseConfig.SPLITWISE_CONSUMER_KEY,\n consumer_secret=SplitwiseConfig.SPLITWISE_CONSUMER_SECRET)\n', (563, 679), False, 'from splitwise import Splitwise\n'), ((804, 817), 'flask.redirect', 'redirect', (['url'], {}), '(url)\n', (812, 817), False, 'from flask import Flask, jsonify, redirect, render_template, request, session, url_for\n'), ((953, 984), 'flask.request.args.get', 'request.args.get', (['"""oauth_token"""'], {}), "('oauth_token')\n", (969, 984), False, 'from flask import Flask, jsonify, redirect, render_template, request, session, url_for\n'), ((1006, 1040), 'flask.request.args.get', 'request.args.get', (['"""oauth_verifier"""'], {}), "('oauth_verifier')\n", (1022, 1040), False, 'from flask import Flask, jsonify, redirect, render_template, request, session, url_for\n'), ((1064, 1189), 'splitwise.Splitwise', 'Splitwise', ([], {'consumer_key': 'SplitwiseConfig.SPLITWISE_CONSUMER_KEY', 'consumer_secret': 'SplitwiseConfig.SPLITWISE_CONSUMER_SECRET'}), '(consumer_key=SplitwiseConfig.SPLITWISE_CONSUMER_KEY,\n consumer_secret=SplitwiseConfig.SPLITWISE_CONSUMER_SECRET)\n', (1073, 1189), False, 'from splitwise import Splitwise\n'), ((1836, 1860), 'flask.jsonify', 'jsonify', (['credential_dict'], {}), '(credential_dict)\n', (1843, 1860), False, 'from flask import Flask, jsonify, redirect, render_template, request, session, url_for\n'), ((1381, 1403), 'flask.url_for', 'url_for', (['"""credentials"""'], {}), "('credentials')\n", (1388, 1403), False, 'from flask import Flask, jsonify, redirect, render_template, request, session, url_for\n'), ((431, 453), 'flask.url_for', 'url_for', (['"""credentials"""'], {}), "('credentials')\n", (438, 453), False, 'from flask import Flask, jsonify, redirect, render_template, request, session, url_for\n'), ((918, 933), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (925, 933), False, 'from flask import Flask, jsonify, redirect, render_template, request, session, url_for\n')] |
'''
# Config
'''
import cv2
import numpy as np
import platform
import time
import sys
##
## Open CV Variables
##
# show the debug output for the open cv
DEMO_MODE = True
# set some variables for testing output
FONT = cv2.FONT_HERSHEY_SIMPLEX
# Min and Max Area Sizes
AREA_SIZE_STOP = 30
AREA_SIZE_TURN = 35
AREA_SIZE_PARK = 75
AREA_SIZE_TRAFFIC = 25
MAX_AREA_SIZE = 2000
# kernels
KERNEL_SIZE = 3
TRAFFIC_KERNEL_SIZE = 3
STOP_KERNEL_SIZE = 9
# traffic signal threshold counters
COUNTER_THRESHOLD_GREEN = 20
COUNTER_THRESHOLD_RED = 25
COUNTER_THRESHOLD_AMBER = 15
# Define what colour space we are working with.
# For some reason Jetson Nano (gstreamer) needs RGB instead of BGR
os = platform.system()
if os == 'Linux': # Jetson
COLOUR_CONVERT = cv2.COLOR_RGB2HSV
elif os == 'Windows': # Testing
COLOUR_CONVERT = cv2.COLOR_BGR2HSV
elif os == 'Darwin':
COLOUR_CONVERT = cv2.COLOR_BGR2HSV
## Error checking (valid_range) function
# show the detection area in the output image
DRAW_RANGE = True
# set the range for detection (horizontal). Fractions of total (5 = 1/5, 2 = 1/2, 1 = whole frame)
VR_TOP = 5 # 1/5 - close to the top but no the roof
VR_BOTTOM = 2 # 1/2 - halfway
##
## Donkey Car Variables
##
# Threshold: How many values in set before running code. (set 0 to always run)
# Size: How many values to keep track of, more values opens potential for higher error rate (min 3, default 10)
DK_COUNTER_THRESHOLD = 4 # will take (+1) of value
DK_COUNTER_SIZE = 10 # 1 = ~0.05 secs, 20 = 1 sec
# Delay: wait this many cycles before executing the command (set to 0 for no delay)
# Runtime: wait this many cycles until AutoPilot can run again
DK_ACTION_DELAY = 10 # 10 = 0.5s, 20 = 1 sec
DK_ACTION_RUNTIME = 60 # 60 = 3.0s, 20 = 1 sec
# show the debug output for the donkey car part.
DK_SHOW_TEXT_DEBUG = True
| [
"platform.system"
] | [((692, 709), 'platform.system', 'platform.system', ([], {}), '()\n', (707, 709), False, 'import platform\n')] |
# Developed by <NAME>
# Last Modified 13/04/19 16:04.
# Copyright (c) 2019 <NAME> and <NAME>
import pytest
from decouple import config
from django.contrib.auth.models import User
from django.test import LiveServerTestCase, TestCase
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.wait import WebDriverWait
# @pytest.mark.selenium
class SeleniumTestCase(LiveServerTestCase):
"""
A base test case for Selenium, providing hepler methods for generating
clients and logging in profiles.
"""
def setUp(self):
options = Options()
if config('MOZ_HEADLESS', 0) == 1:
options.add_argument('-headless')
self.browser = CustomWebDriver(firefox_options=options)
def tearDown(self):
self.browser.quit()
class CustomWebDriver(webdriver.Firefox):
"""Our own WebDriver with some helpers added"""
def find_css(self, css_selector):
"""Shortcut to find elements by CSS. Returns either a list or singleton"""
elems = self.find_elements_by_css_selector(css_selector)
found = len(elems)
if found == 1:
return elems[0]
elif not elems:
raise NoSuchElementException(css_selector)
return elems
def wait_for_css(self, css_selector, timeout=7):
""" Shortcut for WebDriverWait"""
return WebDriverWait(self, timeout).until(lambda driver : driver.find_css(css_selector)) | [
"selenium.webdriver.support.wait.WebDriverWait",
"selenium.webdriver.firefox.options.Options",
"decouple.config",
"selenium.common.exceptions.NoSuchElementException"
] | [((680, 689), 'selenium.webdriver.firefox.options.Options', 'Options', ([], {}), '()\n', (687, 689), False, 'from selenium.webdriver.firefox.options import Options\n'), ((701, 726), 'decouple.config', 'config', (['"""MOZ_HEADLESS"""', '(0)'], {}), "('MOZ_HEADLESS', 0)\n", (707, 726), False, 'from decouple import config\n'), ((1300, 1336), 'selenium.common.exceptions.NoSuchElementException', 'NoSuchElementException', (['css_selector'], {}), '(css_selector)\n', (1322, 1336), False, 'from selenium.common.exceptions import NoSuchElementException\n'), ((1469, 1497), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['self', 'timeout'], {}), '(self, timeout)\n', (1482, 1497), False, 'from selenium.webdriver.support.wait import WebDriverWait\n')] |
"""
General tests that concern all recipes
"""
import os
import sys
from ._base import mock, RecipeTests, test_project
# we use the very simple manage.Recipe to test BaseRecipe functionalities
from djangorecipebook.recipes import manage
class GeneralRecipeTests(RecipeTests):
recipe_class = manage.Recipe
recipe_name = 'manage'
recipe_options = {'recipe': 'djangorecipebook:manage'}
@mock.patch('zc.recipe.egg.egg.Scripts.working_set',
return_value=(None, []))
def test_create_script_projectdir(self, working_set):
# When a project dir is specified, it should be added to sys.path
self.init_recipe({'project-dir': test_project})
self.recipe.install()
to_find_in = os.path.join(self.buildout_dir, test_project)
if sys.platform == 'win32' and sys.version_info >= (3, 4):
to_find_in = to_find_in.lower()
self.assertIn(to_find_in,
self.script_cat('manage'))
@mock.patch('zc.recipe.egg.egg.Scripts.working_set',
return_value=(None, []))
def test_create_script_extra_paths(self, working_set):
# When extra paths are specified, they should be added to sys.path
# we use relative paths so that the test is valid on any platform
extra_paths = ('my/first/extra/path', 'my/second/extra/path')
# mimick buildout.cfg file formatting
self.init_recipe({'extra-paths': '\n '.join(extra_paths)})
self.recipe.install()
manage_script = self.script_cat('manage')
for p in extra_paths:
self.assertIn(os.path.normpath(p), manage_script)
@mock.patch('zc.recipe.egg.egg.Scripts.working_set',
return_value=(None, []))
def test_create_manage_script_with_initialization(self, working_set):
# When an init code is specified, it should be added to the script
self.init_recipe({'initialization': 'import os\nassert True'})
self.recipe.install()
self.assertIn('import os\nassert True\n'
'added_settings = {}\n\n'
'import djangorecipebook',
self.script_cat('manage'))
@mock.patch('zc.recipe.egg.egg.Scripts.working_set',
return_value=(None, []))
def test_create_manage_script_with_args(self, working_set):
# Default install of a test script, check that the call to
# djangorecipebook.test.main is present and has the apps names in the
# arguments
args = ('-v', '--no-input')
self.init_recipe({
'command': 'command',
'args': '\n '.join(args)
})
self.recipe.install()
manage_script = self.script_path('manage')
script_cat = self.script_cat(manage_script)
self.assertIn("djangorecipebook.scripts.manage.main(added_settings, "
"'command', %s)"
% ', '.join(["'%s'" % arg for arg in args]), script_cat)
self.assertIn('added_settings = {', script_cat)
@mock.patch('zc.recipe.egg.egg.Scripts.working_set',
return_value=(None, []))
def test_create_manage_script_with_envvars(self, working_set):
# Install of a test script with custom environment variables
self.init_recipe({'envvars': 'MYENVVAR = value'})
self.recipe.install()
manage_script = self.script_cat('manage')
self.assertIn('import os', manage_script)
self.assertIn("os.environ['MYENVVAR'] = 'value'", manage_script)
| [
"os.path.normpath",
"os.path.join"
] | [((764, 809), 'os.path.join', 'os.path.join', (['self.buildout_dir', 'test_project'], {}), '(self.buildout_dir, test_project)\n', (776, 809), False, 'import os\n'), ((1650, 1669), 'os.path.normpath', 'os.path.normpath', (['p'], {}), '(p)\n', (1666, 1669), False, 'import os\n')] |
import pandas
from pandas import Timestamp
EXPECTED_CONFIG = {
"sheet_name": "df_dropper",
"sheet_key": "sample",
"target_schema": "sand",
"target_table": "bb_test_sheetwork",
"columns": [
{"name": "col_a", "datatype": "int"},
{"name": "col_b", "datatype": "varchar"},
{"name": "col_one", "datatype": "varchar"},
{"name": "renamed_col", "identifier": "long ass name", "datatype": "varchar"},
],
"excluded_columns": ["to_exclude"],
}
EXPECTED_DEV_TEST_PROFILE = {
"db_type": "snowflake",
"account": "a",
"user": "b",
"password": "c",
"role": "d",
"database": "e",
"warehouse": "f",
"schema": "g",
"guser": "<EMAIL>",
}
NO_COLS_EXPECTED_CONFIG = {
"sheet_name": "no_cols",
"sheet_key": "sample",
"target_schema": "sand",
"target_table": "bb_test_sheetwork",
}
EXPECTED_SHEETWORK_PROJECT = {
"name": "sheetwork_test",
"target_schema": "sand",
"always_create_table": True,
"always_create_schema": True,
"destructive_create_table": True,
}
EXPECTED_SHEETWORK_PROJECT_ALL_CREATE = {
"name": "sheetwork_test",
"target_schema": "sand",
"always_create_objects": True,
"destructive_create_table": True,
}
EXPECTED_SHEETWORK_PROJECT_DEPRECATED = {
"name": "sheetwork_test",
"target_schema": "sand",
"always_create": True,
}
DIRTY_DF = {
"col_a": [1, 2, 32],
"col b": ["as . ", "b", " c"],
"1. ??col_one": ["aa", "bb", "cc"],
"": ["q", "q", "q"],
"col_1": [1, 2, 33],
"long ass name": ["foo", "bar", "fizz"],
"col_with_empty_string": ["1", "", "2"],
}
TO_CAST_DF = {
"col_int": ["1", "2", "32"],
"col_varchar": ["foo", "bar", "fizz"],
"created_date": ["2019/01/01", "2019/01/02", "2019/01/03"],
"col_bool": ["false", "False", "true"],
"col_numeric": ["1.2", "1.3", "1"],
}
CAST_DF = {
# this non conversion to int is intentional until we have a better fix see #205, #204
"col_int": {0: "1", 1: "2", 2: "32"},
"col_varchar": {0: "foo", 1: "bar", 2: "fizz"},
"created_date": {
0: Timestamp("2019-01-01 00:00:00"),
1: Timestamp("2019-01-02 00:00:00"),
2: Timestamp("2019-01-03 00:00:00"),
},
"col_bool": {0: False, 1: False, 2: True},
"col_numeric": {0: 1.2, 1: 1.3, 2: 1},
}
CASING_DF = {
"CamelCasedCol": [1, 2, 3],
"snake_cased_col": [1, 2, 3],
}
SNAKE_CASED_COLS = ["camel_cased_col", "snake_cased_col"]
CAMEL_CASED_COLS = ["CamelCasedCol", "SnakeCasedCol"]
CLEAN_DF = {
"col_a": {0: 1, 1: 2, 2: 32},
"col_b": {0: "as .", 1: "b", 2: "c"},
"1_col_one": {0: "aa", 1: "bb", 2: "cc"},
"col_1": {0: 1, 1: 2, 2: 33},
"long_ass_name": {0: "foo", 1: "bar", 2: "fizz"},
"col_with_empty_string": {0: "1", 1: "", 2: "2"},
}
RENAMED_DF = {
"col_a": {0: 1, 1: 2, 2: 32},
"col_b": {0: "as .", 1: "b", 2: "c"},
"1_col_one": {0: "aa", 1: "bb", 2: "cc"},
"col_1": {0: 1, 1: 2, 2: 33},
"renamed_col": {0: "foo", 1: "bar", 2: "fizz"},
}
DROP_COL_DF = {
"col_a": [1, 2, 32],
"col b": ["as . ", "b", " c"],
"1. col_one": ["aa", "bb", "cc"],
"": ["q", "q", "q"],
"long ass name": ["foo", "bar", "fizz"],
"to_exclude": ["garbage1", "garbage2", "garbage3"],
}
RENAMED_COLS = [
"col_a",
"col b",
"1. ??col_one",
"",
"col_1",
"renamed_col",
"col_with_empty_string",
]
EXCLUDED_DF_COLS = ["col_a", "col b", "1. col_one", "", "long ass name"]
EMPTY_HEADER_COLUMNS_DF = {
"col_ a ": [1, 2, 32],
" ": ["as . ", "b", " c"],
"1. col_one": ["aa", "bb", "cc"],
"": ["q", "q", "q"],
" col_1": [1, 2, 33],
}
NON_EMPTY_HEADER = {
"col_a": [1, 2, 32],
"col b": ["as . ", "b", " c"],
"1. col_one": ["aa", "bb", "cc"],
"col_1": [1, 2, 33],
"long ass name": ["foo", "bar", "fizz"],
"col_with_empty_string": ["1", "", "2"],
}
def generate_test_df(df):
test_df = pandas.DataFrame.from_dict(df)
return test_df
| [
"pandas.Timestamp",
"pandas.DataFrame.from_dict"
] | [((3965, 3995), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['df'], {}), '(df)\n', (3991, 3995), False, 'import pandas\n'), ((2113, 2145), 'pandas.Timestamp', 'Timestamp', (['"""2019-01-01 00:00:00"""'], {}), "('2019-01-01 00:00:00')\n", (2122, 2145), False, 'from pandas import Timestamp\n'), ((2158, 2190), 'pandas.Timestamp', 'Timestamp', (['"""2019-01-02 00:00:00"""'], {}), "('2019-01-02 00:00:00')\n", (2167, 2190), False, 'from pandas import Timestamp\n'), ((2203, 2235), 'pandas.Timestamp', 'Timestamp', (['"""2019-01-03 00:00:00"""'], {}), "('2019-01-03 00:00:00')\n", (2212, 2235), False, 'from pandas import Timestamp\n')] |
#!/usr/local/bin/python
'''
Pipeline for converting CSV nsde data to JSON and importing into Elasticsearch.
'''
import glob
import os
from os.path import join, dirname
import luigi
from openfda import common, config, parallel, index_util
from openfda.common import newest_file_timestamp
NSDE_DOWNLOAD = \
'https://download.open.fda.gov/Comprehensive_NDC_SPL_Data_Elements_File.zip'
NSDE_EXTRACT_DB = 'nsde/nsde.db'
NSDE_RAW_DIR = config.data_dir('nsde/raw')
class DownloadNSDE(luigi.Task):
def output(self):
return luigi.LocalTarget(join(NSDE_RAW_DIR, 'nsde.csv'))
def run(self):
output_dir = dirname(self.output().path)
zip_filename = join(output_dir, 'nsde.zip')
common.download(NSDE_DOWNLOAD, zip_filename)
os.system('unzip -o %(zip_filename)s -d %(output_dir)s' % locals())
os.rename(glob.glob(join(output_dir, '*.csv'))[0], self.output().path)
class NSDE2JSONMapper(parallel.Mapper):
rename_map = {
"Item Code": "package_ndc",
"NDC11": "package_ndc11",
"Marketing Category": "marketing_category",
"Marketing Start Date": "marketing_start_date",
"Marketing End Date": "marketing_end_date",
"Billing Unit": "billing_unit",
"Proprietary Name": "proprietary_name",
"Dosage Form": "dosage_form",
"Application Number or Citation": "application_number_or_citation",
"Product Type": "product_type",
"Inactivation Date": "inactivation_date",
"Reactivation Date": "reactivation_date"
}
def map(self, key, value, output):
def _cleaner(k, v):
''' Helper function to rename keys and purge any keys that are not in
the map.
'''
if k in self.rename_map and v is not None and v != '':
if "Date" in k:
return (self.rename_map[k], str(int(v)))
if "Proprietary Name" in k:
return (self.rename_map[k], str(v).title())
else:
return (self.rename_map[k], v)
new_value = common.transform_dict(value, _cleaner)
output.add(key, new_value)
class NSDE2JSON(luigi.Task):
def requires(self):
return DownloadNSDE()
def output(self):
return luigi.LocalTarget(config.data_dir(NSDE_EXTRACT_DB))
def run(self):
parallel.mapreduce(
parallel.Collection.from_glob(
self.input().path, parallel.CSVDictLineInput()),
mapper=NSDE2JSONMapper(),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path)
class LoadJSON(index_util.LoadJSONBase):
index_name = 'othernsde'
type_name = 'othernsde'
mapping_file = './schemas/othernsde_mapping.json'
data_source = NSDE2JSON()
use_checksum = False
optimize_index = True
last_update_date = lambda _: newest_file_timestamp(NSDE_RAW_DIR)
if __name__ == '__main__':
luigi.run()
| [
"luigi.run",
"openfda.common.transform_dict",
"openfda.parallel.IdentityReducer",
"openfda.common.newest_file_timestamp",
"os.path.join",
"openfda.common.download",
"openfda.parallel.CSVDictLineInput",
"openfda.config.data_dir"
] | [((437, 464), 'openfda.config.data_dir', 'config.data_dir', (['"""nsde/raw"""'], {}), "('nsde/raw')\n", (452, 464), False, 'from openfda import common, config, parallel, index_util\n'), ((2753, 2764), 'luigi.run', 'luigi.run', ([], {}), '()\n', (2762, 2764), False, 'import luigi\n'), ((662, 690), 'os.path.join', 'join', (['output_dir', '"""nsde.zip"""'], {}), "(output_dir, 'nsde.zip')\n", (666, 690), False, 'from os.path import join, dirname\n'), ((695, 739), 'openfda.common.download', 'common.download', (['NSDE_DOWNLOAD', 'zip_filename'], {}), '(NSDE_DOWNLOAD, zip_filename)\n', (710, 739), False, 'from openfda import common, config, parallel, index_util\n'), ((1939, 1977), 'openfda.common.transform_dict', 'common.transform_dict', (['value', '_cleaner'], {}), '(value, _cleaner)\n', (1960, 1977), False, 'from openfda import common, config, parallel, index_util\n'), ((2686, 2721), 'openfda.common.newest_file_timestamp', 'newest_file_timestamp', (['NSDE_RAW_DIR'], {}), '(NSDE_RAW_DIR)\n', (2707, 2721), False, 'from openfda.common import newest_file_timestamp\n'), ((548, 578), 'os.path.join', 'join', (['NSDE_RAW_DIR', '"""nsde.csv"""'], {}), "(NSDE_RAW_DIR, 'nsde.csv')\n", (552, 578), False, 'from os.path import join, dirname\n'), ((2138, 2170), 'openfda.config.data_dir', 'config.data_dir', (['NSDE_EXTRACT_DB'], {}), '(NSDE_EXTRACT_DB)\n', (2153, 2170), False, 'from openfda import common, config, parallel, index_util\n'), ((2282, 2309), 'openfda.parallel.CSVDictLineInput', 'parallel.CSVDictLineInput', ([], {}), '()\n', (2307, 2309), False, 'from openfda import common, config, parallel, index_util\n'), ((2362, 2388), 'openfda.parallel.IdentityReducer', 'parallel.IdentityReducer', ([], {}), '()\n', (2386, 2388), False, 'from openfda import common, config, parallel, index_util\n'), ((836, 861), 'os.path.join', 'join', (['output_dir', '"""*.csv"""'], {}), "(output_dir, '*.csv')\n", (840, 861), False, 'from os.path import join, dirname\n')] |
# Generated by Django 2.0.5 on 2019-06-10 00:03
from django.db import migrations, models
import localflavor.mx.models
class Migration(migrations.Migration):
dependencies = [
('INV', '0002_auto_20190608_2204'),
]
operations = [
migrations.AlterField(
model_name='fiscalmx',
name='contactEmail',
field=models.EmailField(default='<EMAIL>', help_text='Correo donde llegarán las notificaciones sobre facturación', max_length=100, verbose_name='Email contacto'),
preserve_default=False,
),
migrations.AlterField(
model_name='fiscalmx',
name='persona',
field=models.CharField(choices=[('FISICA', 'FISICA'), ('MORAL', 'MORAL')], default='Física', max_length=100, verbose_name='Tipo de persona'),
),
migrations.AlterField(
model_name='fiscalmx',
name='razon_social',
field=models.CharField(default='Razon Social', help_text='Razón social de persona Física o Moral', max_length=255, verbose_name='Razón social'),
preserve_default=False,
),
migrations.AlterField(
model_name='fiscalmx',
name='rfc',
field=localflavor.mx.models.MXRFCField(default='SUL010720JN8', max_length=13, verbose_name='RFC'),
preserve_default=False,
),
]
| [
"django.db.models.EmailField",
"django.db.models.CharField"
] | [((369, 533), 'django.db.models.EmailField', 'models.EmailField', ([], {'default': '"""<EMAIL>"""', 'help_text': '"""Correo donde llegarán las notificaciones sobre facturación"""', 'max_length': '(100)', 'verbose_name': '"""Email contacto"""'}), "(default='<EMAIL>', help_text=\n 'Correo donde llegarán las notificaciones sobre facturación',\n max_length=100, verbose_name='Email contacto')\n", (386, 533), False, 'from django.db import migrations, models\n'), ((685, 823), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('FISICA', 'FISICA'), ('MORAL', 'MORAL')]", 'default': '"""Física"""', 'max_length': '(100)', 'verbose_name': '"""Tipo de persona"""'}), "(choices=[('FISICA', 'FISICA'), ('MORAL', 'MORAL')],\n default='Física', max_length=100, verbose_name='Tipo de persona')\n", (701, 823), False, 'from django.db import migrations, models\n'), ((949, 1096), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Razon Social"""', 'help_text': '"""Razón social de persona Física o Moral"""', 'max_length': '(255)', 'verbose_name': '"""Razón social"""'}), "(default='Razon Social', help_text=\n 'Razón social de persona Física o Moral', max_length=255, verbose_name=\n 'Razón social')\n", (965, 1096), False, 'from django.db import migrations, models\n')] |
"""Test functions for pem.fluid.ecl module
"""
import pytest
from pytest import approx
import numpy as np
import digirock.fluids.ecl as fluid_ecl
from inspect import getmembers, isfunction
@pytest.fixture
def tol():
return {
"rel": 0.05, # relative testing tolerance in percent
"abs": 0.00001, # absolute testing tolerance
}
@pytest.mark.parametrize(
"pres, extrap, ans",
[
(325, "const", 1.4615),
(325, "pchip", 1.4615),
(np.r_[325, 375], "const", np.r_[1.4615, 1.4505]),
(np.r_[325, 375], "pchip", np.r_[1.4615, 1.4505]),
],
)
def test_oil_fvf_table(test_data, pres, ans, extrap, tol):
tab = np.loadtxt(test_data / "PVT_BO.inc")
assert np.allclose(
fluid_ecl.oil_fvf_table(tab[:, 0], tab[:, 1], pres, extrap=extrap),
ans,
rtol=tol["rel"],
)
def test_oil_fvf_table_bad_pchi(test_data):
tab = np.loadtxt(test_data / "PVT_BO.inc")
# test bad extrap
with pytest.raises(ValueError):
assert fluid_ecl.oil_fvf_table(
tab[:, 0], tab[:, 1], 235, extrap="Unknown Extrap"
)
@pytest.mark.parametrize(
"pres, extrap, ans",
[
(325, "const", 1.4615),
(325, "pchip", 1.4615),
(np.r_[325, 375], "const", np.r_[1.4615, 1.4505]),
(np.r_[325, 375], "pchip", np.r_[1.4615, 1.4505]),
],
)
def test_oil_fvf_table(test_data, pres, ans, extrap, tol):
tab = np.loadtxt(test_data / "PVT_BO.inc")
assert np.allclose(
fluid_ecl.oil_fvf_table(tab[:, 0], tab[:, 1], pres, extrap=extrap),
ans,
rtol=tol["rel"],
)
@pytest.mark.parametrize("api,ans", ((20, 0.933993399339934), (45, 0.8016997167138812)))
def test_e100_oil_density(api, ans, tol):
assert fluid_ecl.e100_oil_density(api) == approx(ans)
assert np.allclose(
fluid_ecl.e100_oil_density(np.r_[api, api]), np.r_[ans, ans], atol=tol["abs"]
)
| [
"pytest.approx",
"digirock.fluids.ecl.oil_fvf_table",
"pytest.mark.parametrize",
"digirock.fluids.ecl.e100_oil_density",
"pytest.raises",
"numpy.loadtxt"
] | [((375, 580), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pres, extrap, ans"""', "[(325, 'const', 1.4615), (325, 'pchip', 1.4615), (np.r_[325, 375], 'const',\n np.r_[1.4615, 1.4505]), (np.r_[325, 375], 'pchip', np.r_[1.4615, 1.4505])]"], {}), "('pres, extrap, ans', [(325, 'const', 1.4615), (325,\n 'pchip', 1.4615), (np.r_[325, 375], 'const', np.r_[1.4615, 1.4505]), (\n np.r_[325, 375], 'pchip', np.r_[1.4615, 1.4505])])\n", (398, 580), False, 'import pytest\n'), ((1166, 1371), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pres, extrap, ans"""', "[(325, 'const', 1.4615), (325, 'pchip', 1.4615), (np.r_[325, 375], 'const',\n np.r_[1.4615, 1.4505]), (np.r_[325, 375], 'pchip', np.r_[1.4615, 1.4505])]"], {}), "('pres, extrap, ans', [(325, 'const', 1.4615), (325,\n 'pchip', 1.4615), (np.r_[325, 375], 'const', np.r_[1.4615, 1.4505]), (\n np.r_[325, 375], 'pchip', np.r_[1.4615, 1.4505])])\n", (1189, 1371), False, 'import pytest\n'), ((1684, 1776), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""api,ans"""', '((20, 0.933993399339934), (45, 0.8016997167138812))'], {}), "('api,ans', ((20, 0.933993399339934), (45, \n 0.8016997167138812)))\n", (1707, 1776), False, 'import pytest\n'), ((701, 737), 'numpy.loadtxt', 'np.loadtxt', (["(test_data / 'PVT_BO.inc')"], {}), "(test_data / 'PVT_BO.inc')\n", (711, 737), True, 'import numpy as np\n'), ((947, 983), 'numpy.loadtxt', 'np.loadtxt', (["(test_data / 'PVT_BO.inc')"], {}), "(test_data / 'PVT_BO.inc')\n", (957, 983), True, 'import numpy as np\n'), ((1492, 1528), 'numpy.loadtxt', 'np.loadtxt', (["(test_data / 'PVT_BO.inc')"], {}), "(test_data / 'PVT_BO.inc')\n", (1502, 1528), True, 'import numpy as np\n'), ((772, 838), 'digirock.fluids.ecl.oil_fvf_table', 'fluid_ecl.oil_fvf_table', (['tab[:, 0]', 'tab[:, 1]', 'pres'], {'extrap': 'extrap'}), '(tab[:, 0], tab[:, 1], pres, extrap=extrap)\n', (795, 838), True, 'import digirock.fluids.ecl as fluid_ecl\n'), ((1017, 1042), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1030, 1042), False, 'import pytest\n'), ((1060, 1135), 'digirock.fluids.ecl.oil_fvf_table', 'fluid_ecl.oil_fvf_table', (['tab[:, 0]', 'tab[:, 1]', '(235)'], {'extrap': '"""Unknown Extrap"""'}), "(tab[:, 0], tab[:, 1], 235, extrap='Unknown Extrap')\n", (1083, 1135), True, 'import digirock.fluids.ecl as fluid_ecl\n'), ((1563, 1629), 'digirock.fluids.ecl.oil_fvf_table', 'fluid_ecl.oil_fvf_table', (['tab[:, 0]', 'tab[:, 1]', 'pres'], {'extrap': 'extrap'}), '(tab[:, 0], tab[:, 1], pres, extrap=extrap)\n', (1586, 1629), True, 'import digirock.fluids.ecl as fluid_ecl\n'), ((1827, 1858), 'digirock.fluids.ecl.e100_oil_density', 'fluid_ecl.e100_oil_density', (['api'], {}), '(api)\n', (1853, 1858), True, 'import digirock.fluids.ecl as fluid_ecl\n'), ((1862, 1873), 'pytest.approx', 'approx', (['ans'], {}), '(ans)\n', (1868, 1873), False, 'from pytest import approx\n'), ((1908, 1951), 'digirock.fluids.ecl.e100_oil_density', 'fluid_ecl.e100_oil_density', (['np.r_[api, api]'], {}), '(np.r_[api, api])\n', (1934, 1951), True, 'import digirock.fluids.ecl as fluid_ecl\n')] |
'''
Created on: see version log.
@author: rigonz
coding: utf-8
IMPORTANT: requires py3.6 (rasterio)
Script that:
1) reads a series of raster files,
2) runs some checks,
3) makes charts showing the results.
The input data corresponds to a region of the world (ESP) and represents
the population density (pop/km2).
Each file has from a data provider, or different calculation conditions.
The checks consist in verifying that the input files refer to the same region
and to some intercomparison indicators.
The charts show the correlation among the different input data, as tuples
associated to the same geographical location.
Version log.
R0 (20210512):
First trials, seems to work well.
'''
# %% Imports.
import rasterio # IMPORTANT: requires py3.6
import numpy as np
from matplotlib import pyplot as plt
# %% Directories.
RootDirIn = 'D:/0 DOWN/zz EXTSave/GIS/POP/EUR/SHP/'
# Filenames:
FileNameI1 = RootDirIn + 'WP/ESP_clip_pd_2020_1km_UNadj.tif'
FileNameI2 = RootDirIn + 'WP/ESP_clip_ppp_2020_1km_Aggregated_UNadj_d.tif'
FileNameI3 = RootDirIn + 'GPW/ESP_clip gpw_v4_population_density_rev11_2020_30_sec.tif'
FileNameI4 = RootDirIn + 'GPW/ESP_clip gpw_v4_population_density_adjusted_to_2015_unwpp_country_totals_rev11_2020_30_sec.tif'
# %% Read data.
# Open files:
print('Opening and reading the files...')
ds1 = rasterio.open(FileNameI1)
ds2 = rasterio.open(FileNameI2)
ds3 = rasterio.open(FileNameI3)
ds4 = rasterio.open(FileNameI4)
# Read data:
band1 = ds1.read(1)
band2 = ds2.read(1)
band3 = ds3.read(1)
band4 = ds4.read(1)
# %% Check the datasets.
print('Checking the data...')
# Bounds:
if not(ds1.bounds == ds2.bounds and ds2.bounds == ds3.bounds and
ds3.bounds == ds4.bounds):
print('WARNING: bounds are not the same:')
print(ds1.bounds)
print(ds2.bounds)
print(ds3.bounds)
print(ds4.bounds)
# Width and height:
if not(ds1.width == ds2.width and ds2.width == ds3.width and
ds3.width == ds4.width):
print('WARNING: widths are not the same:')
print(ds1.width)
print(ds2.width)
print(ds3.width)
print(ds4.width)
if not(ds1.height == ds2.height and ds2.height == ds3.height and
ds3.height == ds4.height):
print('WARNING: heights are not the same:')
print(ds1.height)
print(ds2.height)
print(ds3.height)
print(ds4.height)
# Bands:
if not(ds1.indexes[0] == ds2.indexes[0] and ds2.indexes[0] == ds3.indexes[0]
and ds3.indexes[0] == ds4.indexes[0]):
print('WARNING: bands are not the same:')
print(ds1.indexes[0])
print(ds2.indexes[0])
print(ds3.indexes[0])
print(ds4.indexes[0])
# Dimensions:
if not(ds1.shape == ds2.shape and ds2.shape == ds3.shape and
ds3.shape == ds4.shape):
print('WARNING: shapes are not the same:')
print(ds1.shape)
print(ds2.shape)
print(ds3.shape)
print(ds4.shape)
# CRS:
try:
if (ds1.crs.data['init'] != 'epsg:4326' or
ds2.crs.data['init'] != 'epsg:4326' or
ds3.crs.data['init'] != 'epsg:4326' or
ds4.crs.data['init'] != 'epsg:4326'):
print('WARNING: CRS is not EPSG:4326.')
except:
print('WARNING: CRS is not available or is not EPSG:4326:')
# %% Create new bands.
print('Checking the new bands...')
# Remain within the boundaries of data:
left = max(ds1.bounds.left, ds2.bounds.left, ds3.bounds.left, ds4.bounds.left)
top = min(ds1.bounds.top, ds2.bounds.top, ds3.bounds.top, ds4.bounds.top)
right = min(ds1.bounds.right, ds2.bounds.right, ds3.bounds.right, ds4.bounds.right)
bottom = max(ds1.bounds.bottom, ds2.bounds.bottom, ds3.bounds.bottom, ds4.bounds.bottom)
res = 1 / 120. # 30 arc-sec, approx 100 m; should be min() etc.
height = int(np.ceil((top - bottom) / res + 1))
width = int(np.ceil((right - left) / res + 1))
res_x = (right - left) / (width - 1)
res_y = (top - bottom) / (height - 1)
# Check (valid for east + north hemispheres only!):
if right > min(ds1.bounds.right, ds2.bounds.right, ds3.bounds.right, ds4.bounds.right):
print('WARNING: right boundary exceeded.')
if bottom > max(ds1.bounds.bottom, ds2.bounds.bottom, ds3.bounds.bottom, ds4.bounds.bottom):
print('WARNING: bottom boundary exceeded.')
# Create new bands:
print('Creating the new bands...')
b1 = np.full((height, width), 0.)
b2 = np.full((height, width), 0.)
b3 = np.full((height, width), 0.)
b4 = np.full((height, width), 0.)
# Populate the new bands:
count = 0
for i in range(0, height-1, 1):
for j in range(0, width-1, 1):
x, y = (left + j * res_x, top - i * res_y)
row, col = ds1.index(x, y)
b1[i, j] = band1[row, col]
row, col = ds2.index(x, y)
b2[i, j] = band2[row, col]
row, col = ds3.index(x, y)
b3[i, j] = band3[row, col]
row, col = ds4.index(x, y)
b4[i, j] = band4[row, col]
# Show the progress:
if count % height % 50 == 0:
print('Progress... {:4.1f}%'.format(count/height*100))
count += 1
# %% Flatten and clear nodata.
print('Preparing the new bands...')
b1f = b1.flatten()
b2f = b2.flatten()
b3f = b3.flatten()
b4f = b4.flatten()
# Remove only nodata, retain 0s:
b_mask = np.array(np.array([b1f, b2f, b3f, b4f]).min(axis=0) < 0)
b1fm = np.delete(b1f, b_mask)
b2fm = np.delete(b2f, b_mask)
b3fm = np.delete(b3f, b_mask)
b4fm = np.delete(b4f, b_mask)
# %% Compute correlations.
print('Pearson coeff. after removing the no-data:')
print('DS1-2 = {:4.3f}.'.format(np.corrcoef(b1fm, b2fm)[0, 1]))
print('DS1-3 = {:4.3f}.'.format(np.corrcoef(b1fm, b3fm)[0, 1]))
print('DS1-4 = {:4.3f}.'.format(np.corrcoef(b1fm, b4fm)[0, 1]))
print('DS2-3 = {:4.3f}.'.format(np.corrcoef(b2fm, b3fm)[0, 1]))
print('DS2-4 = {:4.3f}.'.format(np.corrcoef(b2fm, b4fm)[0, 1]))
print('DS3-4 = {:4.3f}.'.format(np.corrcoef(b3fm, b4fm)[0, 1]))
# %% Draw histograms.
# Auxiliaries:
color = ['k', 'r', 'b', 'g']
label = ['DS1', 'DS2', 'DS3', 'DS4']
# Plot:
plt.hist([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)
# Etc:
plt.title('DS=>0', loc='right')
plt.xlabel('pop. density, hab/km2')
plt.ylabel('count')
plt.grid(True)
plt.legend()
plt.show()
# Zoom at the right tail:
# Plot:
plt.hist([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)
# Etc:
plt.title('DS>=0', loc='right')
plt.xlabel('pop. density, hab/km2')
plt.ylabel('count')
plt.grid(True)
plt.legend()
#•plt.xlim(1500, 40000)
plt.ylim(0, 7500)
plt.show()
# %% Draw chart.
# Auxiliaries:
color = ['k', 'r', 'b', 'g']
# Plot:
plt.figure(1, figsize=(4, 4), dpi=300)
# plt.scatter(b1fm, b3fm, color=color[0], s=1.0, label='1-3', alpha=0.1)
# plt.scatter(b1fm, b4fm, color=color[1], s=1.0, label='1-4', alpha=0.1)
plt.scatter(b2fm, b3fm, color=color[2], s=1.0, label='2-3', alpha=0.1)
# Titles:
plt.title('PD>=0', loc='right')
plt.xlabel('pop. density, hab/km2')
plt.ylabel('pop. density, hab/km2')
# Etc:
plt.grid(True)
plt.legend()
plt.tight_layout()
# Take a look:
plt.show()
# %% Draw heatmap.
# Remove 0s:
b_mask = np.array(np.array([b1f, b2f, b3f, b4f]).min(axis=0) <= 0)
b1fm = np.delete(b1f, b_mask)
b2fm = np.delete(b2f, b_mask)
b3fm = np.delete(b3f, b_mask)
b4fm = np.delete(b4f, b_mask)
# Plot:
plt.hist2d(np.log10(b2fm), np.log10(b3fm), bins=100, cmap='binary')
# Colorbar:
cb = plt.colorbar()
cb.set_label('Number of entries')
# Etc:
plt.title('PD>0', loc='right')
plt.xlabel('log10_DS2 pop. density, hab/km2')
plt.ylabel('log10_DS3 pop. density, hab/km2')
plt.tight_layout()
plt.show()
# %% Script done.
print('\nScript completed. Thanks!')
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"numpy.ceil",
"numpy.corrcoef",
"rasterio.open",
"matplotlib.pyplot.title",
"matp... | [((1327, 1352), 'rasterio.open', 'rasterio.open', (['FileNameI1'], {}), '(FileNameI1)\n', (1340, 1352), False, 'import rasterio\n'), ((1359, 1384), 'rasterio.open', 'rasterio.open', (['FileNameI2'], {}), '(FileNameI2)\n', (1372, 1384), False, 'import rasterio\n'), ((1391, 1416), 'rasterio.open', 'rasterio.open', (['FileNameI3'], {}), '(FileNameI3)\n', (1404, 1416), False, 'import rasterio\n'), ((1423, 1448), 'rasterio.open', 'rasterio.open', (['FileNameI4'], {}), '(FileNameI4)\n', (1436, 1448), False, 'import rasterio\n'), ((4218, 4247), 'numpy.full', 'np.full', (['(height, width)', '(0.0)'], {}), '((height, width), 0.0)\n', (4225, 4247), True, 'import numpy as np\n'), ((4252, 4281), 'numpy.full', 'np.full', (['(height, width)', '(0.0)'], {}), '((height, width), 0.0)\n', (4259, 4281), True, 'import numpy as np\n'), ((4286, 4315), 'numpy.full', 'np.full', (['(height, width)', '(0.0)'], {}), '((height, width), 0.0)\n', (4293, 4315), True, 'import numpy as np\n'), ((4320, 4349), 'numpy.full', 'np.full', (['(height, width)', '(0.0)'], {}), '((height, width), 0.0)\n', (4327, 4349), True, 'import numpy as np\n'), ((5172, 5194), 'numpy.delete', 'np.delete', (['b1f', 'b_mask'], {}), '(b1f, b_mask)\n', (5181, 5194), True, 'import numpy as np\n'), ((5202, 5224), 'numpy.delete', 'np.delete', (['b2f', 'b_mask'], {}), '(b2f, b_mask)\n', (5211, 5224), True, 'import numpy as np\n'), ((5232, 5254), 'numpy.delete', 'np.delete', (['b3f', 'b_mask'], {}), '(b3f, b_mask)\n', (5241, 5254), True, 'import numpy as np\n'), ((5262, 5284), 'numpy.delete', 'np.delete', (['b4f', 'b_mask'], {}), '(b4f, b_mask)\n', (5271, 5284), True, 'import numpy as np\n'), ((5862, 5936), 'matplotlib.pyplot.hist', 'plt.hist', (['[b1fm, b2fm, b3fm, b4fm]'], {'bins': '(20)', 'color': 'color[0:4]', 'label': 'label'}), '([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)\n', (5870, 5936), True, 'from matplotlib import pyplot as plt\n'), ((5945, 5976), 'matplotlib.pyplot.title', 'plt.title', (['"""DS=>0"""'], {'loc': '"""right"""'}), "('DS=>0', loc='right')\n", (5954, 5976), True, 'from matplotlib import pyplot as plt\n'), ((5977, 6012), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pop. density, hab/km2"""'], {}), "('pop. density, hab/km2')\n", (5987, 6012), True, 'from matplotlib import pyplot as plt\n'), ((6013, 6032), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""count"""'], {}), "('count')\n", (6023, 6032), True, 'from matplotlib import pyplot as plt\n'), ((6033, 6047), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6041, 6047), True, 'from matplotlib import pyplot as plt\n'), ((6048, 6060), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6058, 6060), True, 'from matplotlib import pyplot as plt\n'), ((6061, 6071), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6069, 6071), True, 'from matplotlib import pyplot as plt\n'), ((6107, 6181), 'matplotlib.pyplot.hist', 'plt.hist', (['[b1fm, b2fm, b3fm, b4fm]'], {'bins': '(20)', 'color': 'color[0:4]', 'label': 'label'}), '([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)\n', (6115, 6181), True, 'from matplotlib import pyplot as plt\n'), ((6190, 6221), 'matplotlib.pyplot.title', 'plt.title', (['"""DS>=0"""'], {'loc': '"""right"""'}), "('DS>=0', loc='right')\n", (6199, 6221), True, 'from matplotlib import pyplot as plt\n'), ((6222, 6257), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pop. density, hab/km2"""'], {}), "('pop. density, hab/km2')\n", (6232, 6257), True, 'from matplotlib import pyplot as plt\n'), ((6258, 6277), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""count"""'], {}), "('count')\n", (6268, 6277), True, 'from matplotlib import pyplot as plt\n'), ((6278, 6292), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6286, 6292), True, 'from matplotlib import pyplot as plt\n'), ((6293, 6305), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6303, 6305), True, 'from matplotlib import pyplot as plt\n'), ((6330, 6347), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(7500)'], {}), '(0, 7500)\n', (6338, 6347), True, 'from matplotlib import pyplot as plt\n'), ((6348, 6358), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6356, 6358), True, 'from matplotlib import pyplot as plt\n'), ((6431, 6469), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(4, 4)', 'dpi': '(300)'}), '(1, figsize=(4, 4), dpi=300)\n', (6441, 6469), True, 'from matplotlib import pyplot as plt\n'), ((6616, 6686), 'matplotlib.pyplot.scatter', 'plt.scatter', (['b2fm', 'b3fm'], {'color': 'color[2]', 's': '(1.0)', 'label': '"""2-3"""', 'alpha': '(0.1)'}), "(b2fm, b3fm, color=color[2], s=1.0, label='2-3', alpha=0.1)\n", (6627, 6686), True, 'from matplotlib import pyplot as plt\n'), ((6699, 6730), 'matplotlib.pyplot.title', 'plt.title', (['"""PD>=0"""'], {'loc': '"""right"""'}), "('PD>=0', loc='right')\n", (6708, 6730), True, 'from matplotlib import pyplot as plt\n'), ((6731, 6766), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pop. density, hab/km2"""'], {}), "('pop. density, hab/km2')\n", (6741, 6766), True, 'from matplotlib import pyplot as plt\n'), ((6767, 6802), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pop. density, hab/km2"""'], {}), "('pop. density, hab/km2')\n", (6777, 6802), True, 'from matplotlib import pyplot as plt\n'), ((6811, 6825), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6819, 6825), True, 'from matplotlib import pyplot as plt\n'), ((6826, 6838), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6836, 6838), True, 'from matplotlib import pyplot as plt\n'), ((6839, 6857), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6855, 6857), True, 'from matplotlib import pyplot as plt\n'), ((6874, 6884), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6882, 6884), True, 'from matplotlib import pyplot as plt\n'), ((6992, 7014), 'numpy.delete', 'np.delete', (['b1f', 'b_mask'], {}), '(b1f, b_mask)\n', (7001, 7014), True, 'import numpy as np\n'), ((7022, 7044), 'numpy.delete', 'np.delete', (['b2f', 'b_mask'], {}), '(b2f, b_mask)\n', (7031, 7044), True, 'import numpy as np\n'), ((7052, 7074), 'numpy.delete', 'np.delete', (['b3f', 'b_mask'], {}), '(b3f, b_mask)\n', (7061, 7074), True, 'import numpy as np\n'), ((7082, 7104), 'numpy.delete', 'np.delete', (['b4f', 'b_mask'], {}), '(b4f, b_mask)\n', (7091, 7104), True, 'import numpy as np\n'), ((7200, 7214), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7212, 7214), True, 'from matplotlib import pyplot as plt\n'), ((7257, 7287), 'matplotlib.pyplot.title', 'plt.title', (['"""PD>0"""'], {'loc': '"""right"""'}), "('PD>0', loc='right')\n", (7266, 7287), True, 'from matplotlib import pyplot as plt\n'), ((7288, 7333), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log10_DS2 pop. density, hab/km2"""'], {}), "('log10_DS2 pop. density, hab/km2')\n", (7298, 7333), True, 'from matplotlib import pyplot as plt\n'), ((7334, 7379), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log10_DS3 pop. density, hab/km2"""'], {}), "('log10_DS3 pop. density, hab/km2')\n", (7344, 7379), True, 'from matplotlib import pyplot as plt\n'), ((7380, 7398), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7396, 7398), True, 'from matplotlib import pyplot as plt\n'), ((7399, 7409), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7407, 7409), True, 'from matplotlib import pyplot as plt\n'), ((3670, 3703), 'numpy.ceil', 'np.ceil', (['((top - bottom) / res + 1)'], {}), '((top - bottom) / res + 1)\n', (3677, 3703), True, 'import numpy as np\n'), ((3717, 3750), 'numpy.ceil', 'np.ceil', (['((right - left) / res + 1)'], {}), '((right - left) / res + 1)\n', (3724, 3750), True, 'import numpy as np\n'), ((7125, 7139), 'numpy.log10', 'np.log10', (['b2fm'], {}), '(b2fm)\n', (7133, 7139), True, 'import numpy as np\n'), ((7141, 7155), 'numpy.log10', 'np.log10', (['b3fm'], {}), '(b3fm)\n', (7149, 7155), True, 'import numpy as np\n'), ((5397, 5420), 'numpy.corrcoef', 'np.corrcoef', (['b1fm', 'b2fm'], {}), '(b1fm, b2fm)\n', (5408, 5420), True, 'import numpy as np\n'), ((5461, 5484), 'numpy.corrcoef', 'np.corrcoef', (['b1fm', 'b3fm'], {}), '(b1fm, b3fm)\n', (5472, 5484), True, 'import numpy as np\n'), ((5525, 5548), 'numpy.corrcoef', 'np.corrcoef', (['b1fm', 'b4fm'], {}), '(b1fm, b4fm)\n', (5536, 5548), True, 'import numpy as np\n'), ((5589, 5612), 'numpy.corrcoef', 'np.corrcoef', (['b2fm', 'b3fm'], {}), '(b2fm, b3fm)\n', (5600, 5612), True, 'import numpy as np\n'), ((5653, 5676), 'numpy.corrcoef', 'np.corrcoef', (['b2fm', 'b4fm'], {}), '(b2fm, b4fm)\n', (5664, 5676), True, 'import numpy as np\n'), ((5717, 5740), 'numpy.corrcoef', 'np.corrcoef', (['b3fm', 'b4fm'], {}), '(b3fm, b4fm)\n', (5728, 5740), True, 'import numpy as np\n'), ((5117, 5147), 'numpy.array', 'np.array', (['[b1f, b2f, b3f, b4f]'], {}), '([b1f, b2f, b3f, b4f])\n', (5125, 5147), True, 'import numpy as np\n'), ((6936, 6966), 'numpy.array', 'np.array', (['[b1f, b2f, b3f, b4f]'], {}), '([b1f, b2f, b3f, b4f])\n', (6944, 6966), True, 'import numpy as np\n')] |
"""
Hello World Component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/developers/development_101/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
# Initialize the logger
_LOGGER = logging.getLogger(__name__)
# The domain of your component. Equal to the filename of your component.
DOMAIN = "hello_world"
# define the dependencies
DEPENDENCIES = []
CONF_TEXT = 'text'
DEFAULT_TEXT = 'No text!'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_TEXT): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Setup the hello_world component."""
# Get the text from the configuration. Use DEFAULT_TEXT if no name is provided.
text = config[DOMAIN].get(CONF_TEXT, DEFAULT_TEXT)
# States are in the format DOMAIN.OBJECT_ID.
hass.states.set('hello_world.Hello_State', text)
# Return boolean to indicate that initialization was successfully.
return True | [
"logging.getLogger",
"voluptuous.Required"
] | [((290, 317), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (307, 317), False, 'import logging\n'), ((568, 591), 'voluptuous.Required', 'vol.Required', (['CONF_TEXT'], {}), '(CONF_TEXT)\n', (580, 591), True, 'import voluptuous as vol\n')] |