id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4946701 | <reponame>GSByeon/openhgsenti
# -*- coding: utf-8 -*-
# Generated by Django 1.11b1 on 2017-04-02 09:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('quots', '0004_auto_20170402_1832'),
]
operations = [
migrations.AlterField(
model_name='in',
name='answered',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='quots.Outer'),
),
migrations.AlterField(
model_name='out',
name='handler',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quots.Outer'),
),
]
| StarcoderdataPython |
8123546 | <filename>team_9/cocos/cocos/actions/tiledgrid_actions.py
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 <NAME>, <NAME>, <NAME>,
# <NAME>
# Copyright (c) 2009-2019 <NAME>, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Implementation of TiledGrid3DAction actions
"""
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
import random
from cocos.euclid import *
from .basegrid_actions import *
from cocos.director import director
rr = random.randrange
__all__ = ['FadeOutTRTiles', # actions that don't modify the z coordinate
'FadeOutBLTiles',
'FadeOutUpTiles',
'FadeOutDownTiles',
'ShuffleTiles',
'TurnOffTiles',
'SplitRows',
'SplitCols',
'ShakyTiles3D', # actions that modify the z coordinate
'ShatteredTiles3D',
'WavesTiles3D',
'JumpTiles3D', ]
# Don't export this class
class Tile(object):
def __init__(self, position=(0, 0), start_position=(0, 0), delta=(0, 0)):
super(Tile, self).__init__()
self.position = position
self.start_position = start_position
self.delta = delta
def __repr__(self):
return "(start_pos: %s pos: %s delta:%s)" % (self.start_position, self.position, self.delta)
class ShakyTiles3D(TiledGrid3DAction):
"""Simulates a shaky floor composed of tiles
Example::
scene.do(ShakyTiles3D(randrange=6, grid=(4,4), duration=10))
"""
def init(self, randrange=6, *args, **kw):
"""
:Parameters:
`randrange` : int
Number that will be used in random.randrange(-randrange, randrange) to do the effect
"""
super(ShakyTiles3D, self).init(*args, **kw)
self.randrange = randrange
def update(self, t):
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i, j)
for k in range(0, len(coords), 3):
x = rr(-self.randrange, self.randrange + 1)
y = rr(-self.randrange, self.randrange + 1)
z = rr(-self.randrange, self.randrange + 1)
coords[k] += x
coords[k+1] += y
coords[k+2] += z
self.set_tile(i, j, coords)
class ShatteredTiles3D(TiledGrid3DAction):
"""ShatterTiles shatters the tiles according to a random value.
It is similar to shakes (see `ShakyTiles3D`) the tiles just one frame, and then continue with
that state for duration time.
Example::
scene.do(ShatteredTiles3D(randrange=12))
"""
def init(self, randrange=6, *args, **kw):
"""
:Parameters:
`randrange` : int
Number that will be used in random.randrange(-randrange, randrange) to do the effect
"""
super(ShatteredTiles3D, self).init(*args, **kw)
self.randrange = randrange
self._once = False
def update(self, t):
if not self._once:
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i, j)
for k in range(0, len(coords), 3):
x = rr(-self.randrange, self.randrange + 1)
y = rr(-self.randrange, self.randrange + 1)
z = rr(-self.randrange, self.randrange + 1)
coords[k] += x
coords[k+1] += y
coords[k+2] += z
self.set_tile(i, j, coords)
self._once = True
class ShuffleTiles(TiledGrid3DAction):
"""ShuffleTiles moves the tiles randomly across the screen.
To put them back use: Reverse(ShuffleTiles()) with the same seed parameter.
Example::
scene.do(ShuffleTiles(grid=(4,4), seed=1, duration=10))
"""
def init(self, seed=-1, *args, **kw):
"""
:Parameters:
`seed` : float
Seed for the random in the shuffle.
"""
super(ShuffleTiles, self).init(*args, **kw)
self.seed = seed
def start(self):
super(ShuffleTiles, self).start()
self.tiles = {}
self._once = False
if self.seed != -1:
random.seed(self.seed)
# random positions
self.nr_of_tiles = self.grid.x * self.grid.y
self.tiles_order = list(range(self.nr_of_tiles))
random.shuffle(self.tiles_order)
for i in range(self.grid.x):
for j in range(self.grid.y):
self.tiles[(i, j)] = Tile(position=Point2(i, j),
start_position=Point2(i, j),
delta=self._get_delta(i, j))
def place_tile(self, i, j):
t = self.tiles[(i, j)]
coords = self.get_original_tile(i, j)
for k in range(0, len(coords), 3):
coords[k] += int(t.position.x * self.target.grid.x_step)
coords[k+1] += int(t.position.y * self.target.grid.y_step)
self.set_tile(i, j, coords)
def update(self, t):
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
self.tiles[(i, j)].position = self.tiles[(i, j)].delta * t
self.place_tile(i, j)
# private method
def _get_delta(self, x, y):
idx = x * self.grid.y + y
i, j = divmod(self.tiles_order[idx], self.grid.y)
return Point2(i, j)-Point2(x, y)
class FadeOutTRTiles(TiledGrid3DAction):
"""Fades out each tile following a diagonal Top-Right path until all the tiles are faded out.
Example::
scene.do(FadeOutTRTiles(grid=(16,12), duration=10))
"""
def update(self, t):
# direction right - up
for i in range(self.grid.x):
for j in range(self.grid.y):
distance = self.test_func(i, j, t)
if distance == 0:
self.turn_off_tile(i, j)
elif distance < 1:
self.transform_tile(i, j, distance)
else:
self.turn_on_tile(i, j)
def turn_on_tile(self, x, y):
self.set_tile(x, y, self.get_original_tile(x, y))
def transform_tile(self, x, y, t):
coords = self.get_original_tile(x, y)
for c in range(len(coords)):
# x
if c == 0 * 3 or c == 3 * 3:
coords[c] = coords[c] + (self.target.grid.x_step / 2.0)*(1 - t)
elif c == 1 * 3 or c == 2 * 3:
coords[c] = coords[c] - (self.target.grid.x_step / 2.0)*(1 - t)
# y
if c == 0*3 + 1 or c == 1*3 + 1:
coords[c] = coords[c] + (self.target.grid.y_step / 2.0)*(1 - t)
elif c == 2*3 + 1 or c == 3*3 + 1:
coords[c] = coords[c] - (self.target.grid.y_step / 2.0)*(1 - t)
self.set_tile(x, y, coords)
def turn_off_tile(self, x, y):
self.set_tile(x, y, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_func(self, i, j, t):
x, y = self.grid * t
if x + y == 0:
return 1
return pow((i + j) / (x + y), 6)
class FadeOutBLTiles(FadeOutTRTiles):
"""Fades out each tile following an Bottom-Left path until all the tiles are faded out.
Example::
scene.do(FadeOutBLTiles(grid=(16,12), duration=5))
"""
def test_func(self, i, j, t):
x, y = self.grid * (1 - t)
if i + j == 0:
return 1
return pow((x + y) / (i + j), 6)
class FadeOutUpTiles(FadeOutTRTiles):
"""Fades out each tile following an upwards path until all the tiles are faded out.
Example::
scene.do(FadeOutUpTiles(grid=(16,12), duration=5))
"""
def test_func(self, i, j, t):
x, y = self.grid * t
if y == 0:
return 1
return pow(j / y, 6)
def transform_tile(self, x, y, t):
coords = self.get_original_tile(x, y)
for c in range(len(coords)):
# y
if c == 0*3 + 1 or c == 1*3 + 1:
coords[c] = coords[c] + (self.target.grid.y_step / 2.0)*(1 - t)
elif c == 2*3 + 1 or c == 3*3 + 1:
coords[c] = coords[c] - (self.target.grid.y_step / 2.0)*(1 - t)
self.set_tile(x, y, coords)
class FadeOutDownTiles(FadeOutUpTiles):
"""Fades out each tile following an downwards path until all the tiles are faded out.
Example::
scene.do(FadeOutDownTiles(grid=(16,12), duration=5))
"""
def test_func(self, i, j, t):
x, y = self.grid * (1 - t)
if j == 0:
return 1
return pow(y / j, 6)
class TurnOffTiles(TiledGrid3DAction):
"""TurnOffTiles turns off each in random order
Example::
scene.do(TurnOffTiles(grid=(16,12), seed=1, duration=10))
"""
def init(self, seed=-1, *args, **kw):
super(TurnOffTiles, self).init(*args, **kw)
self.seed = seed
def start(self):
super(TurnOffTiles, self).start()
if self.seed != -1:
random.seed(self.seed)
self.nr_of_tiles = self.grid.x * self.grid.y
self.tiles_order = list(range(self.nr_of_tiles))
random.shuffle(self.tiles_order)
def update(self, t):
l = int(t * self.nr_of_tiles)
for i in range(self.nr_of_tiles):
t = self.tiles_order[i]
if i < l:
self.turn_off_tile(t)
else:
self.turn_on_tile(t)
def get_tile_pos(self, idx):
return divmod(idx, self.grid.y)
def turn_on_tile(self, t):
x, y = self.get_tile_pos(t)
self.set_tile(x, y, self.get_original_tile(x, y))
def turn_off_tile(self, t):
x, y = self.get_tile_pos(t)
self.set_tile(x, y, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
class WavesTiles3D(TiledGrid3DAction):
"""Simulates waves using the math.sin() function in the z-axis of each tile
Example::
scene.do(WavesTiles3D(waves=5, amplitude=120, grid=(16,16), duration=10))
"""
def init(self, waves=4, amplitude=120, *args, **kw):
"""
:Parameters:
`waves` : int
Number of waves (2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
"""
super(WavesTiles3D, self).init(*args, **kw)
#: Total number of waves to perform
self.waves = waves
#: amplitude rate. Default: 1.0
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
self.amplitude = amplitude
def update(self, t):
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i, j)
x = coords[0]
y = coords[1]
z = (math.sin(t*math.pi*self.waves*2 + (y + x)*.01)*self.amplitude*self.amplitude_rate)
for k in range(0, len(coords), 3):
coords[k+2] += z
self.set_tile(i, j, coords)
class JumpTiles3D(TiledGrid3DAction):
"""Odd tiles will perform a jump in the z-axis using the sine function,
while the even tiles will perform a jump using sine+pi function
Example::
scene.do(JumpTiles3D(jumps=5, amplitude=40, grid=(16,16), duration=10))
"""
def init(self, jumps=4, amplitude=20, *args, **kw):
"""
:Parameters:
`jumps` : int
Number of jumps(2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
"""
super(JumpTiles3D, self).init(*args, **kw)
#: Total number of jumps to perform
self.jumps = jumps
#: amplitude rate. Default: 1.0
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
self.amplitude = amplitude
def update(self, t):
phase = t * math.pi * self.jumps * 2
amplitude = self.amplitude * self.amplitude_rate
sinz = math.sin(phase) * amplitude
sinz2 = math.sin(math.pi + phase) * amplitude
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i, j)
for k in range(0, len(coords), 3):
if (i + j) % 2 == 0:
coords[k+2] += sinz
else:
coords[k+2] += sinz2
self.set_tile(i, j, coords)
class SplitRows(TiledGrid3DAction):
"""Split the screen in a number of rows, and move
these rows away from the screen.
The odds rows are moved to the left, while the even rows are moved to
the right.
Example::
scene.do(SplitRows(rows=3, duration=2))
"""
def init(self, rows=9, grid=(-1, -1), *args, **kw):
"""
:Parameters:
`rows` : int
Number of rows that will have the effect. Default: 9
"""
if grid != (-1, -1):
raise Exception("This action doesn't receives the grid argument")
grid = (1, rows)
self.rows = rows
super(SplitRows, self).init(grid, *args, **kw)
def update(self, t):
x, y = director.get_window_size()
for j in range(0, self.grid.y):
coords = self.get_original_tile(0, j)
for c in range(0, len(coords), 3):
direction = 1
if j % 2 == 0:
direction = -1
coords[c] += direction * x * t
self.set_tile(0, j, coords)
class SplitCols(TiledGrid3DAction):
"""Split the screen in a number of columns, and move
these columns away from the screen.
The odds columns are moved to the upwards, while the even
columns are moved to the downwards.
Example::
scene.do(SplitCols(cols=3, duration=2))
"""
def init(self, cols=9, grid=(-1, -1), *args, **kw):
"""
:Parameters:
`cols` : int
Number of columns that will have the effect. Default: 9
"""
if grid != (-1, -1):
raise Exception("This action doesn't receives the grid argument")
grid = (cols, 1)
self.cols = cols
super(SplitCols, self).init(grid, *args, **kw)
def update(self, t):
x, y = director.get_window_size()
for i in range(0, self.grid.x):
coords = self.get_original_tile(i, 0)
for c in range(0, len(coords), 3):
direction = 1
if i % 2 == 0:
direction = -1
coords[c+1] += direction * y * t
self.set_tile(i, 0, coords)
| StarcoderdataPython |
5141841 | <reponame>jbellogo/Hours-Log
# Easy to use Google Sheets API
import gspread
# Required credentials for acessing private data as is Google Sheets
from oauth2client.service_account import ServiceAccountCredentials
# Full, permissive scope to access all of a user's files
SCOPE = ['https://www.googleapis.com/auth/drive']
CREDENTIALS = ServiceAccountCredentials.from_json_keyfile_name('creds.json', SCOPE)
CLIENT = gspread.authorize(CREDENTIALS)
SHEET = CLIENT.open('SPREAD_SHEET_NAME').sheet1
# Dictionary to pandas data frame
import pandas as pd
df = pd.DataFrame(SHEET.get_all_records())
### Helper Methods ###
def front(self, n):
'''
like head() pandas method but for columns
'''
return self.iloc[:, :n]
def back(self, n):
'''
like tail() pandas method but for columns
'''
return self.iloc[:, -n:]
pd.DataFrame.front = front
pd.DataFrame.back = back
### WEEK STATS ###
hours_per_day_data = df.loc[[13],:]
def print_week_info(hours_per_day_df):
'''
takes in a dataframe with the day totals, 2Row array
Can't be generalized for any row yet
'''
message = "WEEK INFORMATION:\n\n"
total = hours_per_day_df['TW'].values[0]
message += f"Total hours this week: {total}\n"
sorted_df = hours_per_day_df.loc[[13], "M":'Su'].sort_values(by=13, ascending=False, axis=1)
max_frame = sorted_df.front(1)
max_hours = max_frame.values[0][0]
max_day = max_frame.columns[0]
message += f"Week high of {max_hours}h on {max_day}\n"
min_frame = sorted_df.back(1)
min_hours = min_frame.values[0][0]
min_day = min_frame.columns[0]
message += f"Week low of {min_hours}h on {min_day}\n"
average = hours_per_day_df.loc[[13], "M":'Su'].mean(axis=1)
average = "{:.3f}".format(average.values[0])
message += f"Average of hours working per day: {average}h\n"
return message
### COURSE INFO ###
hours_per_course_data = df.loc[0:12,['Subject', 'TW']]
def print_course_info(hours_per_course_df):
'''
Calculates statistics based on hours per week spend on each course. Concatenates all into one single string.
'''
sorted_frame = hours_per_course_df.sort_values(by='TW', ascending=False)
list_of_priorities = sorted_frame.head(3)['Subject']
list_of_neglected = sorted_frame.tail(3)['Subject']
ordered_times = sorted_frame['TW']
sort_indices = list(sorted_frame.index) #list with the indices at the position of their sorted values
message = "COURSE INFORMATION\n\nPrioritized:\n"
count = 1
for course in list_of_priorities:
message += f"Your #{count} prioritized course this week was: "+ course + f" by: {ordered_times[sort_indices[count-1]]}h\n"
count += 1
count = 1
message += "\nNeglected:\n"
for course in list_of_neglected:
message += f"Your #{count} neglected course this week was: "+ course + f" by: {ordered_times[sort_indices[-count]]}h\n"
count+= 1
school_course_average = hours_per_course_df.loc[6:12 ,['TW']].mean()
school_course_average = "{:.3f}".format(school_course_average[0])
message += f"\nMean:\nThe average time spent per school course this week was: {school_course_average}h"
return message
def main():
'''
Creates the message to be sent as a single string
'''
week = print_week_info(hours_per_day_data)
course = print_course_info(hours_per_course_data)
return ('\n\n' + week + '\n\n' + course)
# Sending Email:
import smtplib, ssl, getpass #for invisible input
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = "<EMAIL>" # Enter your address. need to enable "Less Secure Apps" on gmail
receiver_email = "<EMAIL>" # Enter receiver address
password = <PASSWORD>("Type your password and press enter: ")
message = 'Week Summary: \n' + main()
print(message)
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
try:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
print('message sent successfully')
except:
print('An error occured')
| StarcoderdataPython |
251403 | <reponame>mehrdad-shokri/retdec-regression-tests-framework
"""
A representation of a decompilation that has run.
"""
import os
import re
from regression_tests.parsers.fileinfo_output_parser import FileinfoOutput
from regression_tests.tools.tool import Tool
from regression_tests.utils import memoize
from regression_tests.utils import overrides
class Decompiler(Tool):
"""A representation of a decompilation that has run."""
@property
def input_file(self):
"""The input file (:class:`.File`)."""
return self.args.input_files[0]
@property
def out_hll_file(self):
"""Output file in the high-level language (C, Python)."""
return self._get_file(self.args.output_file.name)
@property
@memoize
def out_hll(self):
"""Contents of the output file in the high-level language (C, Python).
"""
return self.out_hll_file.text
def out_hll_is_c(self):
"""Checks if the output file in the high-level language is a C file."""
return self.args.hll is None or self.args.hll == 'c'
@property
def out_c_file(self):
"""Output file in the C language.
An alias for the :func:`out_hll_file` property.
:raises AssertionError: If the HLL is not C.
"""
self._verify_output_hll_is_c()
return self.out_hll_file
@property
def out_base_file_name(self):
"""Base output file name used to construct other output file names.
"""
return os.path.splitext(self.args.output_file.name)[0]
@property
@memoize
def out_c(self):
"""Contents of the output file in the C language.
An alias for the :func:`out_hll` property.
:raises AssertionError: If the HLL is not C.
"""
self._verify_output_hll_is_c()
return self.out_hll
@property
def out_dsm_file(self):
"""Output DSM file."""
return self._get_file(self.out_base_file_name + '.dsm')
@property
@memoize
def out_dsm(self):
"""Contents of the output DSM file.
"""
return self.out_dsm_file.text
@property
@memoize
def out_ll_file(self):
"""Output LLVM IR file."""
return self._get_file(self.out_base_file_name + '.ll')
@property
@memoize
def out_ll(self):
"""Contents of the output LLVM IR file.
"""
return self.out_ll_file.text
@property
def out_config_file(self):
"""Output configuration file."""
return self._get_file(self.out_base_file_name + '.config.json')
@property
@memoize
def out_config(self):
"""Contents of the output configuration file.
"""
return self.out_config_file.text
@property
@overrides(Tool)
def log_file_name(self):
return self.args.output_file.name + '.log'
@property
@memoize
def fileinfo_output(self):
"""Output from fileinfo (:class:`FileinfoOutput`).
When there are multiple outputs (i.e. fileinfo run several times), it
returns the first one. If this is the case, you can use
func:`fileinfo_outputs()` instead, which always returns a list.
:raises AssertionError: If fileinfo did not run or did not produce any
output.
"""
if not self.fileinfo_outputs:
raise AssertionError('fileinfo did not run or did not produce any output')
return self.fileinfo_outputs[0]
@property
@memoize
def fileinfo_outputs(self):
"""Outputs from fileinfo (list of :class:`FileinfoOutput`).
This function can be used when fileinfo run several times. If it run
only once, a singleton list is returned.
"""
outputs = re.findall(r"""
\#\#\#\#\#\ Gathering\ file\ information[^\n]+\n
RUN: [^\n]+\n
(.*?)
\n
(?:
\#\#\#\#\#\ # Ended correctly.
|
\./retdec-decompiler:\ line\ \d+: # Failed (segfault etc.).
)
""", self.log, re.VERBOSE | re.MULTILINE | re.DOTALL)
return [FileinfoOutput(output.strip()) for output in outputs]
def _verify_output_hll_is_c(self):
"""Verifies that the output HLL is C."""
if not self.out_hll_is_c():
raise AssertionError('the output high-level language is not C')
| StarcoderdataPython |
5129668 | """
Author: <NAME>
Institute: Stony Brook University
"""
import torch.nn as nn
from Layers.MRN_pytorch import *
from Layers.DMTRL_pytorch import *
from Layers.TAL_pytorch import *
class SingleTaskModel(nn.Module):
def __init__(self, hidden_feature, task_classes):
super(SingleTaskModel, self).__init__()
#
self.nets = []
self.num_classes = task_classes
for d, num_classes in enumerate(task_classes):
net = nn.Sequential(
nn.Linear(1152, hidden_feature),
# nn.BatchNorm1d(hidden_feature),
nn.ReLU(),
nn.Linear(hidden_feature, hidden_feature),
# nn.BatchNorm1d(hidden_feature),
nn.ReLU(),
nn.Linear(hidden_feature, hidden_feature),
# nn.BatchNorm1d(hidden_feature),
nn.ReLU(),
nn.Linear(hidden_feature, num_classes),
)
self.nets.append(net)
setattr(self, 'net_%d' % d, net)
self.loss = nn.BCEWithLogitsLoss()
return
def forward(self, feature, taskID):
return self.nets[taskID](feature)
def return_loss(self, x, y, taskID):
y_pred = self.forward(x, taskID)
return self.loss(y_pred, y) * self.num_classes[taskID]
class HardSharedModel(nn.Module):
def __init__(self, hidden_feature, task_classes):
super(HardSharedModel, self).__init__()
self.backend = nn.Sequential(
nn.Linear(1152, hidden_feature),
# nn.BatchNorm1d(hidden_feature),
nn.ReLU(),
nn.Linear(hidden_feature, hidden_feature),
# nn.BatchNorm1d(hidden_feature),
nn.ReLU(),
nn.Linear(hidden_feature, hidden_feature),
# nn.BatchNorm1d(hidden_feature),
nn.ReLU(),
)
self.num_classes = task_classes
self.nets = []
for d, num_classes in enumerate(task_classes):
net = nn.Sequential(
nn.Linear(hidden_feature, num_classes),
)
self.nets.append(net)
setattr(self, 'net_%d' % d, net)
self.loss = nn.BCEWithLogitsLoss()
return
def forward(self, feature, taskID):
hidden = self.backend(feature)
return self.nets[taskID](hidden)
def return_loss(self, x, y, taskID):
y_pred = self.forward(x, taskID)
return self.loss(y_pred, y) * self.num_classes[taskID]
class MultiRelationNet(nn.Module):
def __init__(self, hidden_feature, task_classes, c=1e-1, regularization_task=True,
regularization_feature=True, regularization_input=True, update_interval=50):
super(MultiRelationNet, self).__init__()
self.layer1 = MRN_Linear(1152, hidden_feature, len(task_classes), dropout=False, bn=False,
regularization_task=regularization_task, regularization_feature=regularization_feature,
regularization_input=regularization_input, update_interval=update_interval)
self.layer2 = MRN_Linear(hidden_feature, hidden_feature, len(task_classes), dropout=False, bn=False,
regularization_task=regularization_task, regularization_feature=regularization_feature,
regularization_input=regularization_input, update_interval=update_interval)
self.layer3 = MRN_Linear(hidden_feature, hidden_feature, len(task_classes), dropout=False, bn=False,
regularization_task=regularization_task, regularization_feature=regularization_feature,
regularization_input=regularization_input, update_interval=update_interval)
###
self.num_classes = task_classes
self.nets = []
for d, num_classes in enumerate(task_classes):
net = nn.Sequential(
nn.Linear(hidden_feature, num_classes),
)
self.nets.append(net)
setattr(self, 'net_%d' % d, net)
self.loss = nn.BCEWithLogitsLoss()
self.c = c
return
def forward(self, feature, taskID):
layer1 = self.layer1(feature, taskID)
layer2 = self.layer2(layer1, taskID)
layer3 = self.layer3(layer2, taskID)
return self.nets[taskID](layer3)
def return_loss(self, x, y, taskID):
y_pred = self.forward(x, taskID)
loss = self.loss(y_pred, y) * self.num_classes[taskID]
loss += self.layer1.regularization(self.c)
loss += self.layer2.regularization(self.c)
loss += self.layer3.regularization(self.c)
return loss
class SoftOrderNet(nn.Module):
def __init__(self, hidden_feature, task_classes):
super(SoftOrderNet, self).__init__()
#
self.backend = nn.Sequential(
nn.Linear(1152, hidden_feature),
# nn.BatchNorm1d(hidden_feature),
nn.ReLU(),
)
# soft order layer.
self.softlayers = []
for d in range(2):
net = nn.Sequential(
nn.Linear(hidden_feature, hidden_feature),
# nn.BatchNorm1d(hidden_feature),
nn.ReLU(),
)
self.softlayers.append(net)
setattr(self, 'soft_layer_%d' % d, net)
# soft order matrix.
self.S = nn.Parameter(0.01 * torch.randn(size=(16, 2, 2)))
self.nets = []
self.num_classes = task_classes
for d, num_classes in enumerate(task_classes):
net = nn.Sequential(
nn.Linear(hidden_feature, num_classes),
)
self.nets.append(net)
setattr(self, 'net_%d' % d, net)
self.loss = nn.BCEWithLogitsLoss()
return
def forward(self, feature, taskID):
selection = torch.softmax(self.S, dim=-1)
layer1 = self.backend(feature)
layer21 = self.softlayers[0](layer1)
layer22 = self.softlayers[1](layer1)
layer2 = selection[taskID, 0, 0] * layer21 + selection[taskID, 0, 1] * layer22
layer31 = self.softlayers[0](layer2)
layer32 = self.softlayers[1](layer2)
layer3 = selection[taskID, 1, 0] * layer31 + selection[taskID, 1, 1] * layer32
return self.nets[taskID](layer3)
def return_loss(self, x, y, taskID):
y_pred = self.forward(x, taskID)
return self.loss(y_pred, y) * self.num_classes[taskID]
class DMTRL(nn.Module):
def __init__(self, hidden_feature, task_classes, method='Tucker'):
super(DMTRL, self).__init__()
self.layer1 = DMTRL_Linear(1152, hidden_feature, len(task_classes), method)
self.layer2 = DMTRL_Linear(hidden_feature, hidden_feature, len(task_classes), method)
self.layer3 = DMTRL_Linear(hidden_feature, hidden_feature, len(task_classes), method)
self.nets = []
self.num_classes = task_classes
for d, num_classes in enumerate(task_classes):
net = nn.Sequential(
nn.Linear(hidden_feature, num_classes),
)
self.nets.append(net)
setattr(self, 'net_%d' % d, net)
self.loss = nn.BCEWithLogitsLoss()
return
def forward(self, feature, taskID):
layer1 = self.layer1(feature, taskID)
layer2 = self.layer2(layer1, taskID)
layer3 = self.layer3(layer2, taskID)
return self.nets[taskID](layer3)
def return_loss(self, x, y, taskID):
y_pred = self.forward(x, taskID)
return self.loss(y_pred, y) * self.num_classes[taskID]
class TAAN(nn.Module):
def __init__(self, hidden_feature, task_classes, basis=16, c=0.1, regularization=None):
super(TAAN, self).__init__()
self.layer1 = TAL_Linear(1152, hidden_feature, basis=basis, tasks=len(task_classes),
regularize=regularization)
self.layer2 = TAL_Linear(hidden_feature, hidden_feature, basis=basis, tasks=len(task_classes),
regularize=regularization)
self.layer3 = TAL_Linear(hidden_feature, hidden_feature, basis=basis, tasks=len(task_classes),
regularize=regularization)
self.nets = []
self.num_classes = task_classes
for d, num_classes in enumerate(task_classes):
net = nn.Sequential(
nn.Linear(hidden_feature, num_classes),
)
self.nets.append(net)
setattr(self, 'net_%d' % d, net)
self.loss = nn.BCEWithLogitsLoss()
self.c = c
self.regularization = regularization
return
def forward(self, feature, taskID):
layer1 = self.layer1(feature, taskID)
layer2 = self.layer2(layer1, taskID)
layer3 = self.layer3(layer2, taskID)
return self.nets[taskID](layer3)
def return_loss(self, x, y, taskID):
y_pred = self.forward(x, taskID)
loss = self.loss(y_pred, y) * self.num_classes[taskID]
loss += self.layer1.regularization(self.c)
loss += self.layer2.regularization(self.c)
loss += self.layer3.regularization(self.c)
return loss
class CrossStitch(nn.Module):
def __init__(self, hidden_feature, task_classes):
super(CrossStitch, self).__init__()
self.num_tasks = len(task_classes)
self.linear1 = [nn.Linear(1152, hidden_feature) for _ in range(self.num_tasks)]
self.linear2 = [nn.Linear(hidden_feature, hidden_feature) for _ in range(self.num_tasks)]
self.linear3 = [nn.Linear(hidden_feature, hidden_feature) for _ in range(self.num_tasks)]
for l, layer in enumerate(self.linear1):
setattr(self, 'layer1_%d' % l, layer)
for l, layer in enumerate(self.linear2):
setattr(self, 'layer2_%d' % l, layer)
for l, layer in enumerate(self.linear3):
setattr(self, 'layer3_%d' % l, layer)
#
self.alpha1 = nn.Parameter(0.01 * torch.randn(size=(self.num_tasks, self.num_tasks)))
self.alpha2 = nn.Parameter(0.01 * torch.randn(size=(self.num_tasks, self.num_tasks)))
self.alpha3 = nn.Parameter(0.01 * torch.randn(size=(self.num_tasks, self.num_tasks)))
#
self.nets = []
self.num_classes = task_classes
for d, num_classes in enumerate(task_classes):
net = nn.Sequential(
nn.Linear(hidden_feature, num_classes),
)
self.nets.append(net)
setattr(self, 'net_%d' % d, net)
#
self.loss = nn.BCEWithLogitsLoss()
return
def forward(self, feature, taskID):
# normalize alpha.
expALphaNorm = torch.norm(self.alpha1, dim=-1, keepdim=True).detach()
self.alpha1.data /= expALphaNorm.data
expALphaNorm = torch.norm(self.alpha2, dim=-1, keepdim=True).detach()
self.alpha2.data /= expALphaNorm.data
expALphaNorm = torch.norm(self.alpha3, dim=-1, keepdim=True).detach()
self.alpha3.data /= expALphaNorm.data
# forwarding.
feature1 = torch.tensordot(self.alpha1, torch.cat([F.relu(linear(feature)).unsqueeze(0) for linear in self.linear1], 0),
dims=([-1], [0]))
feature2 = torch.tensordot(self.alpha2,
torch.cat([F.relu(linear(feature1[i])).unsqueeze(0) for i, linear in enumerate(self.linear2)], 0),
dims=([-1], [0]))
feature3 = torch.tensordot(self.alpha3,
torch.cat([F.relu(linear(feature2[i])).unsqueeze(0) for i, linear in enumerate(self.linear3)], 0),
dims=([-1], [0]))
return self.nets[taskID](feature3[taskID])
def return_loss(self, x, y, taskID):
y_pred = self.forward(x, taskID)
return self.loss(y_pred, y) * self.num_classes[taskID]
class MMoE(nn.Module):
def __init__(self, hidden_feature, task_classes, Expert=6):
super(MMoE, self).__init__()
self.num_tasks = len(task_classes)
self.Experts = []
for i in range(Expert):
nets = nn.Sequential(
nn.Linear(1152, hidden_feature),
nn.ReLU(),
nn.Linear(hidden_feature, hidden_feature),
nn.ReLU(),
nn.Linear(hidden_feature, hidden_feature),
nn.ReLU(),
).cuda()
self.Experts.append(nets)
setattr(self, 'Expert_%d' % i, nets)
#
self.gates = nn.Parameter(0.01 * torch.randn(size=(self.num_tasks, Expert, 1152)))
#
self.nets = []
self.num_classes = task_classes
for d, num_classes in enumerate(task_classes):
net = nn.Sequential(
nn.Linear(hidden_feature, num_classes),
)
self.nets.append(net)
setattr(self, 'net_%d' % d, net)
#
self.loss = nn.BCEWithLogitsLoss()
return
def forward(self, feature, taskID):
# shape = [experts, batches, dims]
hidden_features = torch.cat([subnetwork(feature).unsqueeze(0) for subnetwork in self.Experts], 0)
# shape = [experts, batches, 1]
gate = F.softmax(F.linear(feature, self.gates[taskID])).transpose(1, 0).unsqueeze(-1)
# shape = [batches, dims]
hidden_features = torch.sum(gate * hidden_features, dim=0)
return self.nets[taskID](hidden_features)
def return_loss(self, x, y, taskID):
y_pred = self.forward(x, taskID)
return self.loss(y_pred, y) * self.num_classes[taskID] | StarcoderdataPython |
11351142 | import os
import json
from oauth2client.service_account import ServiceAccountCredentials
def get_github_credentials():
secret = os.environ.get("GH_SECRET")
oauth_token = os.environ.get("GH_AUTH")
return secret, oauth_token
def get_telegram_credentials():
bot_token = os.environ.get("BOT_TOKEN")
chat_id = os.environ.get("BOT_CHAT_ID")
return bot_token, chat_id
def get_google_credentials():
scopes = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
service_account_info = os.environ.get("GOOGLE_CREDENTIALS")
service_account_info_dict = json.loads(service_account_info)
credentials = ServiceAccountCredentials.from_json_keyfile_dict(
service_account_info_dict,
scopes
)
return credentials
| StarcoderdataPython |
3203434 | import json
from os.path import join, dirname, abspath, isfile
from core.devices.pixel import Pixel
"""
This module contains point clouds that can be used to construct PointCloud devices
point cloud files should be json files in the same format as gl_server, eg
[
{"point": [1,0,0]},
{"point": [0,1,0]}
]
"""
# Module path
path = join(dirname(abspath(__file__)), "")
def load_point_cloud(filename):
filepath = path + filename
# Check file exists
if not isfile(filepath):
raise Exception("Cannot find point cloud file "+filepath)
# Load data as json
with open(filepath) as f:
data = json.load(f)
# Construct pixels
return [Pixel(i["point"]) for i in data]
| StarcoderdataPython |
4931612 | import numpy.core.numeric as _nx
def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
x = np.arange(8.0)
np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try:
# handle scalar case.
Nsections = len(indices_or_sections) + 1
print "Hurray"
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError:
# indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] +
extras * [Neach_section+1] +
(Nsections-extras) * [Neach_section])
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
# This "kludge" was introduced here to replace arrays shaped (0, 10)
# or similar with an array shaped (0,).
# There seems no need for this, so give a FutureWarning to remove later.
if sub_arys[-1].size == 0 and sub_arys[-1].ndim != 1:
warnings.warn("in the future np.array_split will retain the shape of "
"arrays with a zero size, instead of replacing them by "
"`array([])`, which always has a shape of (0,).",
FutureWarning)
sub_arys = _replace_zero_by_x_arrays(sub_arys)
return sub_arys
print array_split(range(100), [33,66,99])
| StarcoderdataPython |
3462642 | from django.db import models
from django.db.models import F
from django.utils import timezone
class Domain(models.Model):
"""
Model to represent hostnames and domains
"""
name = models.CharField(max_length=300, db_index=True)
wildcard = models.BooleanField(
default=False,
help_text='Include all subdomains e.g. *.example.com',
)
class Meta:
verbose_name = 'Domain'
verbose_name_plural = 'Domains'
ordering = ('name', )
def __unicode__(self):
return self.name
class Redirect(models.Model):
"""
Model to define old_domain/old_path to new_domain/new_paths
"""
old_domain = models.ForeignKey(Domain, related_name='old_urls')
old = models.CharField('Old Path', max_length=1000, db_index=True)
new_domain = models.ForeignKey(Domain, related_name='new_urls')
new = models.CharField('New Path', max_length=1000)
class Meta:
verbose_name = 'Redirect'
verbose_name_plural = 'Redirects'
ordering = ('old_domain', 'old')
def __unicode__(self):
return "%s/%s Redirect" % (self.old_domain.name, self.old)
class Ignore404(models.Model):
"""
Patterns to ignore in 404s
"""
domain = models.ForeignKey(Domain, related_name='ignores')
name = models.CharField(max_length=100)
starts_with = models.CharField(
max_length=100,
blank=True,
help_text='Ignore if path begins with this.',
)
ends_with = models.CharField(
max_length=200,
blank=True,
help_text='Ignore if path ends with this.',
)
pattern = models.CharField(
'Pattern',
max_length=500,
blank=True,
help_text='Ignore if path matches this regexp pattern',
)
class Meta:
verbose_name = 'Ignore 404'
verbose_name_plural = 'Ignore 404s'
ordering = ('domain', 'name')
def __unicode__(self):
return "%s Ignore for %s" % (self.name, self.domain.name)
class Seen404(models.Model):
"""
Model to store a count of 404s that are not redirected or ignored
"""
domain = models.ForeignKey(Domain, related_name='seen_404s')
path = models.CharField(max_length=1000, db_index=True)
count = models.IntegerField(default=1, db_index=True)
first_seen = models.DateTimeField(default=timezone.now)
last_seen = models.DateTimeField(default=timezone.now)
class Meta:
verbose_name = 'Seen 404'
verbose_name_plural = 'Seen 404s'
ordering = ('count', 'domain', 'path')
def __unicode__(self):
return "Seen 404 for %s/%s" % (self.domain.name, self.path)
def increment(self, count=1):
""" Increment the seen count """
self.count = F('count') + count
self.save()
def save(self, *args, **kwargs):
self.last_seen = timezone.now()
super(Seen404, self).save(*args, **kwargs)
| StarcoderdataPython |
5187833 | <filename>src/components/auth/model.py
import mongoengine as me
from flask_wtf import FlaskForm
import wtforms as wf
from wtforms.validators import DataRequired
from flask_login import UserMixin
class LoginForm(FlaskForm):
username = wf.StringField(
label="name", validators=[DataRequired("Username is required")]
)
password = wf.StringField(
label="password", validators=[DataRequired("Password is required")]
)
class RegisterForm(FlaskForm):
username = wf.StringField(
label="name", validators=[DataRequired("Username is required")]
)
password = wf.StringField(
label="password", validators=[DataRequired("Password is required")]
)
publicKey = wf.StringField(
label="public_key", validators=[DataRequired("Public key is required")]
)
class TokenBlocklist(me.Document):
jti = me.StringField()
created_at = me.DateTimeField()
meta = {"collections": "tokens"} | StarcoderdataPython |
8156747 | <reponame>GlobalFishingWatch/ais-tools<filename>ais-stream/decode.py
import json
import base64
from config import load_config
from ais_tools.aivdm import AIVDM
decoder = AIVDM()
def handle_event(event, context, pubsub_client):
data_in = message_in = None
try:
config = load_config()
data_in = base64.b64decode(event.get('data', u'')).decode('utf-8')
message_in = json.loads(data_in)
if 'nmea' not in message_in:
raise Exception("missing nmea field")
if 'source' not in message_in:
message_in['source'] = config['DEFAULT_SOURCE']
message_out = message_in
message_out.update(decoder.safe_decode(message_in['nmea']))
data_out = json.dumps(message_out).encode("utf-8")
pubsub_client.publish(config['DECODE_PUBSUB_TOPIC'], data=data_out, source=message_out['source'])
if 'error' in message_out:
print("Message decode failed - {error}".format(**message_out))
else:
print("Message decode succeeded")
print(message_out)
except Exception as e:
print(message_in or data_in or '<empty message>')
print("Message decode failed - {}: {}".format(e.__class__.__name__, str(e)))
raise
| StarcoderdataPython |
343095 | #!/usr/bin/env python
import rospy
import math
import tf
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
DEBUG_MODE = False
MAX_DECEL = 0.5
STOP_DIST = 5.0
TARGET_SPEED_MPH = 20
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb, queue_size=1)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size=1)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb, queue_size=1)
rospy.Subscriber('/obstacle_waypoint', Lane, self.obstacle_cb, queue_size=1)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
self.cur_pose = None
self.waypoints = None
self.red_light_waypoint = None
self.waypoints = None
rospy.spin()
def pose_cb(self, msg):
self.cur_pose = msg.pose
if self.waypoints is not None:
self.publish()
def waypoints_cb(self, lane):
# do this once and not all the time
if self.waypoints is None:
self.waypoints = lane.waypoints
def traffic_cb(self, msg):
self.red_light_waypoint = msg.data
rospy.loginfo("Detected light: " + str(msg.data))
if self.red_light_waypoint > -1:
self.publish()
def obstacle_cb(self, msg):
# No obstacles from simulator used in Capstone project. Thus have not implemented it yet.
rospy.loginfo('Received obstacle info {}'.format(msg))
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
def distance(self, p1, p2):
x, y, z = p1.x - p2.x, p1.y - p2.y, p1.z - p2.z
return math.sqrt(x*x + y*y + z*z)
def closest_waypoint(self, pose, waypoints):
# simply take the code from the path planning module and re-implement it here
closest_len = 100000
closest_waypoint = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2)
for index, waypoint in enumerate(self.waypoints):
dist = dl(pose.position, waypoint.pose.pose.position)
if (dist < closest_len):
closest_len = dist
closest_waypoint = index
return closest_waypoint
def next_waypoint(self, pose, waypoints):
# same concepts from path planning in here
closest_waypoint = self.closest_waypoint(pose, waypoints)
map_x = waypoints[closest_waypoint].pose.pose.position.x
map_y = waypoints[closest_waypoint].pose.pose.position.y
heading = math.atan2((map_y - pose.position.y), (map_x - pose.position.x))
quaternion = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)
_, _, yaw = tf.transformations.euler_from_quaternion(quaternion)
angle = abs(yaw - heading)
if angle > (math.pi / 4):
closest_waypoint += 1
return closest_waypoint
def decelerate(self, waypoints, redlight_index):
if len(waypoints) < 1:
return []
first = waypoints[0]
last = waypoints[redlight_index]
last.twist.twist.linear.x = 0.
# start from the waypoint before last and go backwards
for index, wp in enumerate(waypoints):
if index > redlight_index:
vel = 0
else:
dist = self.distance(wp.pose.pose.position, last.pose.pose.position)
dist = max(0, dist - STOP_DIST)
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < 1.:
vel = 0.
wp.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
return waypoints
def publish(self):
if self.cur_pose is not None:
next_waypoint_index = self.next_waypoint(self.cur_pose, self.waypoints)
lookahead_waypoints = self.waypoints[next_waypoint_index:next_waypoint_index+LOOKAHEAD_WPS]
if self.red_light_waypoint is None or self.red_light_waypoint < 0:
# set the velocity for lookahead waypoints
for i in range(len(lookahead_waypoints) - 1):
# convert 10 miles per hour to meters per sec
self.set_waypoint_velocity(lookahead_waypoints, i, (TARGET_SPEED_MPH / 2.237 ))
else:
redlight_lookahead_index = max(0, self.red_light_waypoint - next_waypoint_index)
lookahead_waypoints = self.decelerate(lookahead_waypoints, redlight_lookahead_index)
if DEBUG_MODE:
posx = self.waypoints[next_waypoint_index].pose.pose.position.x
posy = self.waypoints[next_waypoint_index].pose.pose.position.y
rospy.loginfo("Closest waypoint: [index=%d posx=%f posy=%f]", next_waypoint_index, posx, posy)
lane = Lane()
lane.header.frame_id = '/world'
lane.header.stamp = rospy.Time(0)
lane.waypoints = lookahead_waypoints
self.final_waypoints_pub.publish(lane)
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.') | StarcoderdataPython |
1971695 | <gh_stars>100-1000
import cea.plots.demand
import cea.plots.cache
import plotly.graph_objs as go
from plotly.offline import plot
import pandas as pd
from cea.plots.variable_naming import NAMING, LOGO, COLOR
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
class EnergyDemandDistrictPlot(cea.plots.demand.DemandPlotBase):
"""Implement the energy-use plot"""
name = "Energy End-use"
def __init__(self, project, parameters, cache):
super(EnergyDemandDistrictPlot, self).__init__(project, parameters, cache)
self.analysis_fields = ["E_sys_MWhyr",
"Qhs_sys_MWhyr", "Qww_sys_MWhyr",
"Qcs_sys_MWhyr", 'Qcdata_sys_MWhyr', 'Qcre_sys_MWhyr']
@property
def layout(self):
return go.Layout(barmode='stack',
yaxis=dict(title='Energy Demand [MWh/yr]'),
xaxis=dict(title='Building Name'), showlegend=True)
def calc_graph(self):
graph = []
analysis_fields = self.remove_unused_fields(self.data, self.analysis_fields)
dataframe = self.data
dataframe['total'] = dataframe[analysis_fields].sum(axis=1)
dataframe.sort_values(by='total', ascending=False, inplace=True)
dataframe.reset_index(inplace=True, drop=True)
for field in analysis_fields:
y = dataframe[field]
name = NAMING[field]
total_percent = (y / dataframe['total'] * 100).round(2).values
total_percent_txt = ["(%.2f %%)" % x for x in total_percent]
trace = go.Bar(x=dataframe["Name"], y=y, name=name, text=total_percent_txt, orientation='v',
marker=dict(color=COLOR[field]))
graph.append(trace)
return graph
def calc_table(self):
data_frame = self.data
analysis_fields = self.remove_unused_fields(self.data, self.analysis_fields)
median = data_frame[analysis_fields].median().round(2).tolist()
total = data_frame[analysis_fields].sum().round(2).tolist()
total_perc = [str(x) + " (" + str(round(x / sum(total) * 100, 1)) + " %)" for x in total]
# calculate graph
anchors = []
load_names = []
for field in analysis_fields:
anchors.append(', '.join(calc_top_three_anchor_loads(data_frame, field)))
load_names.append(NAMING[field] + ' (' + field.split('_', 1)[0] + ')')
column_names = ['Load Name', 'Total [MWh/yr]', 'Median [MWh/yr]', 'Top 3 Consumers']
table_df = pd.DataFrame({'Load Name': load_names + ["Total"],
'Total [MWh/yr]': total_perc + [str(sum(total)) + " (" + str(100) + " %)"],
'Median [MWh/yr]': median + ["-"],
'Top 3 Consumers': anchors + ['-']}, columns=column_names)
return table_df
def energy_demand_district(data_frame, analysis_fields, title, output_path):
# CALCULATE GRAPH
traces_graph = calc_graph(analysis_fields, data_frame)
# CALCULATE TABLE
traces_table = calc_table(analysis_fields, data_frame)
# PLOT GRAPH
traces_graph.append(traces_table)
layout = go.Layout(images=LOGO, title=title, barmode='stack',
yaxis=dict(title='Energy [MWh/yr]', domain=[0.35, 1]),
xaxis=dict(title='Building Name'), showlegend=True)
fig = go.Figure(data=traces_graph, layout=layout)
plot(fig, auto_open=False, filename=output_path)
return {'data': traces_graph, 'layout': layout}
def calc_table(analysis_fields, data_frame):
median = data_frame[analysis_fields].median().round(2).tolist()
total = data_frame[analysis_fields].sum().round(2).tolist()
total_perc = [str(x) + " (" + str(round(x / sum(total) * 100, 1)) + " %)" for x in total]
# calculate graph
anchors = []
load_names = []
for field in analysis_fields:
anchors.append(calc_top_three_anchor_loads(data_frame, field))
load_names.append(NAMING[field] + ' (' + field.split('_', 1)[0] + ')')
table = go.Table(domain=dict(x=[0, 1.0], y=[0, 0.2]),
header=dict(values=['Load Name', 'Total [MWh/yr]', 'Median [MWh/yr]', 'Top 3 Consumers']),
cells=dict(values=[load_names, total_perc, median, anchors]))
return table
def calc_graph(analysis_fields, data):
graph = []
data['total'] = data[analysis_fields].sum(axis=1)
data = data.sort_values(by='total', ascending=False)
for field in analysis_fields:
y = data[field]
name = NAMING[field]
total_percent = (y / data['total'] * 100).round(2).values
total_percent_txt = ["(%.2f %%)" % x for x in total_percent]
trace = go.Bar(x=data["Name"], y=y, name=name, text=total_percent_txt, orientation='v',
marker=dict(color=COLOR[field]))
graph.append(trace)
return graph
def calc_top_three_anchor_loads(data_frame, field):
data_frame = data_frame.sort_values(by=field, ascending=False)
anchor_list = data_frame[:3].Name.values
return anchor_list
def main():
import cea.config
import cea.inputlocator
config = cea.config.Configuration()
locator = cea.inputlocator.InputLocator(config.scenario)
# cache = cea.plots.cache.PlotCache(config.project)
cache = cea.plots.cache.NullPlotCache()
EnergyDemandDistrictPlot(config.project, {'buildings': None,
'scenario-name': config.scenario_name},
cache).plot(auto_open=True)
EnergyDemandDistrictPlot(config.project, {'buildings': locator.get_zone_building_names()[0:2],
'scenario-name': config.scenario_name},
cache).plot(auto_open=True)
EnergyDemandDistrictPlot(config.project, {'buildings': [locator.get_zone_building_names()[0]],
'scenario-name': config.scenario_name},
cache).plot(auto_open=True)
if __name__ == '__main__':
main()
| StarcoderdataPython |
6604217 | import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
cv2.circle(frame, (120, 150), 5, (0, 0, 255), -1)
cv2.circle(frame, (530, 150), 5, (0, 0, 255), -1)
cv2.circle(frame, (120, 340), 5, (0, 0, 255), -1)
cv2.circle(frame, (530, 340), 5, (0, 0, 255), -1)
pts1 = np.float32([[120, 150], [530, 150], [120, 340], [530, 340]])
pts2 = np.float32([[100, -100], [600, -200], [-100, 150], [500, 500]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
result = cv2.warpPerspective(frame, matrix, (800, 600))
cv2.imshow("Frame", frame)
cv2.imshow("Perspective transformation", result)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
11201475 | #python exceptions let you deal with
#unexpected results
try:
print(a)
except:
print('a is not defined!') #a is not defined, this will print
#there are specific errors to help with cases
try:
print(a)
except NameError:
print('a is still not defined!') #also true, and will print
except:
print('Something else went wrong.')
#This will break our program
#since a is not defined
print(a) | StarcoderdataPython |
353255 | import time
import numpy as np
import tensorflow as tf
from gcn.utils import *
def evaluate(sess, model, features, support, labels, mask, placeholders):
t_test = time.time()
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], (time.time() - t_test)
def get_soft_prediction_labels(sess, model, features, support, labels, mask, placeholders):
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
feed_dict_val.update({placeholders['dropout']: 0})
return sess.run(tf.nn.softmax(model.outputs), feed_dict=feed_dict_val)
def train_model(FLAGS, sess, model, features, support, y_train, y_val, train_mask, val_mask, placeholders):
cost_val = []
final_pred_soft = 0
final_pred = 0
for epoch in range(FLAGS.epochs):
t = time.time()
# Construct feed dictionary
feed_dict = construct_feed_dict(features, support, y_train, train_mask, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Training step
outs = sess.run([model.opt_op, model.loss, model.accuracy], feed_dict=feed_dict)
# Validation
cost, acc, duration = evaluate(sess, model, features, support, y_val, val_mask, placeholders)
cost_val.append(cost)
# Print results
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
"train_acc=", "{:.5f}".format(outs[2]), "val_loss=", "{:.5f}".format(cost),
"val_acc=", "{:.5f}".format(acc), "time=", "{:.5f}".format(time.time() - t))
if epoch > FLAGS.epoch_to_start_collect_weights:
labels = get_soft_prediction_labels(sess, model, features, support, y_val, val_mask, placeholders)
final_pred_soft += labels
final_pred = final_pred_soft.argmax(axis=1)
if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
print("Early stopping...")
break
return final_pred_soft, final_pred | StarcoderdataPython |
4941358 | <reponame>gunnarvoet/rbrmoored
"""
Library for RBR data processing.
"""
__all__ = ["solo"]
__version__ = "0.1.0"
from . import solo
| StarcoderdataPython |
4942961 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SuperpowerItem(scrapy.Item):
questions = scrapy.Field()
votes = scrapy.Field() | StarcoderdataPython |
6514412 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-14 15:08
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('message', '0004_auto_20170814_1754'),
]
operations = [
migrations.AlterField(
model_name='post',
name='post_creation',
field=models.DateTimeField(default=datetime.datetime(2017, 8, 14, 15, 8, 58, 691436, tzinfo=utc)),
),
]
| StarcoderdataPython |
6560806 | <reponame>mefuller/ARC
import arc.species.conformers
import arc.species.converter
import arc.species.species
import arc.species.xyz_to_2d
from arc.species.species import ARCSpecies
| StarcoderdataPython |
6658509 | <reponame>verenich/time-prediction-benchmark
case_id_col = {}
activity_col = {}
timestamp_col = {}
label_col = {}
pos_label = {}
neg_label = {}
dynamic_cat_cols = {}
static_cat_cols = {}
dynamic_num_cols = {}
static_num_cols = {}
filename = {}
#### BPIC2011 settings ####
dataset = "bpic2011"
filename[dataset] = "logdata/bpic2011.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "deviant"
neg_label[dataset] = "regular"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity code", "Producer code", "Section", "Specialism code", "group"]
static_cat_cols[dataset] = ["Diagnosis", "Treatment code", "Diagnosis code", "case Specialism code", "Diagnosis Treatment Combination ID"]
dynamic_num_cols[dataset] = ["Number of executions", "duration", "month", "weekday", "hour"]
static_num_cols[dataset] = ["Age"]
#### BPIC2015 settings ####
for municipality in range(1,6):
dataset = "bpic2015%s"%municipality
filename[dataset] = "logdata/bpic2015_%s.csv"%municipality
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "deviant"
neg_label[dataset] = "regular"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", "monitoringResource", "question", "Resource"]
static_cat_cols[dataset] = ["Responsible_actor"]
dynamic_num_cols[dataset] = ["duration", "month", "weekday", "hour"]
static_num_cols[dataset] = ["SUMleges", 'Aanleg (Uitvoeren werk of werkzaamheid)', 'Bouw', 'Brandveilig gebruik (vergunning)', 'Gebiedsbescherming', 'Handelen in strijd met regels RO', 'Inrit/Uitweg', 'Kap', 'Milieu (neutraal wijziging)', 'Milieu (omgevingsvergunning beperkte milieutoets)', 'Milieu (vergunning)', 'Monument', 'Reclame', 'Sloop']
if municipality in [3,5]:
static_num_cols[dataset].append('Flora en Fauna')
if municipality in [1,2,3,5]:
static_num_cols[dataset].append('Brandveilig gebruik (melding)')
static_num_cols[dataset].append('Milieu (melding)')
#### BPIC2017 settings ####
dataset = "bpic2017"
filename[dataset] = "logdata/bpic2017.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
neg_label[dataset] = "regular"
pos_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", 'Resource', 'Action', 'CreditScore', 'EventOrigin', 'lifecycle:transition']
static_cat_cols[dataset] = ['ApplicationType', 'LoanGoal']
dynamic_num_cols[dataset] = ['FirstWithdrawalAmount', 'MonthlyCost', 'NumberOfTerms', 'OfferedAmount', "duration", "month", "weekday", "hour", "activity_duration"]
static_num_cols[dataset] = ['RequestedAmount']
#### Traffic fines settings ####
dataset = "traffic_fines"
filename[dataset] = "logdata/traffic_fines.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "deviant"
neg_label[dataset] = "regular"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", "Resource", "lastSent", "notificationType", "dismissal"]
static_cat_cols[dataset] = ["article", "vehicleClass"]
dynamic_num_cols[dataset] = ["expense", "duration", "month", "weekday", "hour"]
static_num_cols[dataset] = ["amount", "points"]
#### Sepsis Cases settings ####
dataset = "sepsis"
filename[dataset] = "logdata/sepsis.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "regular"
neg_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", 'Diagnose', 'org:group']
static_cat_cols[dataset] = ['DiagnosticArtAstrup', 'DiagnosticBlood', 'DiagnosticECG',
'DiagnosticIC', 'DiagnosticLacticAcid', 'DiagnosticLiquor',
'DiagnosticOther', 'DiagnosticSputum', 'DiagnosticUrinaryCulture',
'DiagnosticUrinarySediment', 'DiagnosticXthorax', 'DisfuncOrg',
'Hypotensie', 'Hypoxie', 'InfectionSuspected', 'Infusion', 'Oligurie',
'SIRSCritHeartRate', 'SIRSCritLeucos', 'SIRSCritTachypnea',
'SIRSCritTemperature', 'SIRSCriteria2OrMore']
dynamic_num_cols[dataset] = ['CRP', 'LacticAcid', 'Leucocytes', "duration", "month", "weekday", "hour"]
static_num_cols[dataset] = ['Age']
#### BPI2012A settings ####
dataset = "bpic2012a"
filename[dataset] = "logdata/bpic2012a.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "regular"
neg_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ['activity_name', 'Resource']
static_cat_cols[dataset] = []
dynamic_num_cols[dataset] = ['open_cases','elapsed']
static_num_cols[dataset] = ['AMOUNT_REQ']
#### BPI2012O settings ####
dataset = "bpic2012o"
filename[dataset] = "logdata/bpic2012o.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "regular"
neg_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ['activity_name', 'Resource']
static_cat_cols[dataset] = []
dynamic_num_cols[dataset] = ['open_cases','elapsed']
static_num_cols[dataset] = ['AMOUNT_REQ']
#### BPI2012W settings ####
dataset = "bpic2012w"
filename[dataset] = "logdata/bpic2012w.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "regular"
neg_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ['activity_name', 'Resource']
static_cat_cols[dataset] = []
dynamic_num_cols[dataset] = ['open_cases','elapsed','proctime']
static_num_cols[dataset] = ['AMOUNT_REQ']
#### Credit requirements settings ####
dataset = "credit"
filename[dataset] = "logdata/credit.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "regular"
neg_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ['Activity','weekday','hour']
static_cat_cols[dataset] = []
dynamic_num_cols[dataset] = ['open_cases','timesincecasestart','timesincemidnight','activity_duration']
static_num_cols[dataset] = []
#### helpdesk settings ####
dataset = "helpdesk"
filename[dataset] = "logdata/helpdesk.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "regular"
neg_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ['activity_name','Resource']
static_cat_cols[dataset] = ["customer", "product", "responsible_section", "seriousness", "service_level", "service_type", "support_section"]
dynamic_num_cols[dataset] = ['open_cases','elapsed']
static_num_cols[dataset] = []
#### hospital billing settings ####
dataset = "hospital"
filename[dataset] = "logdata/hospital.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "regular"
neg_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", "Resource","actOrange","actRed", "blocked", "caseType", "diagnosis", "flagC","flagD", "msgCode", "msgType", "state", "version"]
static_cat_cols[dataset] = ["speciality"]
dynamic_num_cols[dataset] = ["msgCount", "timesincelastevent", "timesincecasestart", "event_nr", "weekday", "hour", "open_cases"]
static_num_cols[dataset] = []
#### minit invoice settings ####
dataset = "invoice"
filename[dataset] = "logdata/invoice.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "regular"
neg_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", "Resource", "ActivityFinalAction", "EventType", "weekday", "hour"]
static_cat_cols[dataset] = ["CostCenter.Code", "Supplier.City", "Supplier.Name", "Supplier.State"]
dynamic_num_cols[dataset] = ["open_cases", "timesincelastevent", "timesincecasestart", "event_nr"]
static_num_cols[dataset] = ["InvoiceTotalAmountWithoutVAT"]
#### production log settings ####
dataset = "production"
filename[dataset] = "logdata/Production_Data.csv"
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "remtime"
pos_label[dataset] = "regular"
neg_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", "Resource", "Report Type", "Worker ID","weekday"]
static_cat_cols[dataset] = ["Part Desc"]
dynamic_num_cols[dataset] = ["Qty Completed", "Qty for MRB", "activity_duration", "hour", "timesincelastevent", "timesincecasestart", "event_nr", "open_cases"]
static_num_cols[dataset] = ["Work Order Qty"]
| StarcoderdataPython |
200220 | <reponame>empymod/frequency-design
import emg3d
import empymod
import numpy as np
import ipywidgets as widgets
import scipy.interpolate as si
import matplotlib.pyplot as plt
from IPython.display import display
from scipy.signal import find_peaks
# Define all errors we want to catch with the variable-checks and setting of
# default values. This is not perfect, but better than 'except Exception'.
VariableCatch = (LookupError, AttributeError, ValueError, TypeError, NameError)
# Interactive Frequency Selection
class InteractiveFrequency(emg3d.utils.Fourier):
"""App to create required frequencies for Fourier Transform."""
def __init__(self, src_z, rec_z, depth, res, time, signal=0, ab=11,
aniso=None, **kwargs):
"""App to create required frequencies for Fourier Transform.
No thorough input checks are carried out. Rubbish in, rubbish out.
See empymod.model.dipole for details regarding the modelling.
Parameters
----------
src_z, rec_z : floats
Source and receiver depths and offset. The source is located at
src=(0, 0, src_z), the receiver at rec=(off, 0, rec_z).
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
time : array_like
Times t (s).
signal : {0, 1, -1}, optional
Source signal, default is 0:
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ab : int, optional
Source-receiver configuration, defaults to 11. (See
empymod.model.dipole for all possibilities.)
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
**kwargs : Optional parameters:
- ``fmin`` : float
Initial minimum frequency. Default is 1e-3.
- ``fmax`` : float
Initial maximum frequency. Default is 1e1.
- ``off`` : float
Initial offset. Default is 500.
- ``ft`` : str {'dlf', 'fftlog'}
Initial Fourier transform method. Default is 'dlf'.
- ``ftarg`` : dict
Initial Fourier transform arguments corresponding to ``ft``.
Default is None.
- ``pts_per_dec`` : int
Initial points per decade. Default is 5.
- ``linlog`` : str {'linear', 'log'}
Initial display scaling. Default is 'linear'.
- ``xtfact`` : float
Factor for linear x-dimension: t_max = xtfact*offset/1000.
- ``verb`` : int
Verbosity. Only for debugging purposes.
"""
# Get initial values or set to default.
fmin = kwargs.pop('fmin', 1e-3)
fmax = kwargs.pop('fmax', 1e1)
off = kwargs.pop('off', 5000)
ft = kwargs.pop('ft', 'dlf')
ftarg = kwargs.pop('ftarg', None)
self.pts_per_dec = kwargs.pop('pts_per_dec', 5)
self.linlog = kwargs.pop('linlog', 'linear')
self.xtfact = kwargs.pop('xtfact', 1)
self.verb = kwargs.pop('verb', 1)
# Ensure no kwargs left.
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# Collect model from input.
self.model = {
'src': [0, 0, src_z],
'rec': [off, 0, rec_z],
'depth': depth,
'res': res,
'aniso': aniso,
'ab': ab,
'verb': self.verb,
}
# Initiate a Fourier instance.
super().__init__(time, fmin, fmax, signal, ft, ftarg, verb=self.verb)
# Create the figure.
self.initiate_figure()
def initiate_figure(self):
"""Create the figure."""
# Create figure and all axes
fig = plt.figure("Interactive frequency selection for the Fourier "
"Transform.", figsize=(9, 4))
plt.subplots_adjust(hspace=0.03, wspace=0.04, bottom=0.15, top=0.9)
# plt.tight_layout(rect=[0, 0, 1, 0.95]) # Leave space for suptitle.
ax1 = plt.subplot2grid((3, 2), (0, 0), rowspan=2)
plt.grid('on', alpha=0.4)
ax2 = plt.subplot2grid((3, 2), (0, 1), rowspan=2)
plt.grid('on', alpha=0.4)
ax3 = plt.subplot2grid((3, 2), (2, 0))
plt.grid('on', alpha=0.4)
ax4 = plt.subplot2grid((3, 2), (2, 1))
plt.grid('on', alpha=0.4)
# Synchronize x-axis, switch upper labels off
ax1.get_shared_x_axes().join(ax1, ax3)
ax2.get_shared_x_axes().join(ax2, ax4)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
# Move labels of t-domain to the right
ax2.yaxis.set_ticks_position('right')
ax4.yaxis.set_ticks_position('right')
# Set fixed limits
ax1.set_xscale('log')
ax3.set_yscale('log')
ax3.set_yscale('log')
ax3.set_ylim([0.007, 141])
ax3.set_yticks([0.01, 0.1, 1, 10, 100])
ax3.set_yticklabels(('0.01', '0.1', '1', '10', '100'))
ax4.set_yscale('log')
ax4.set_yscale('log')
ax4.set_ylim([0.007, 141])
ax4.set_yticks([0.01, 0.1, 1, 10, 100])
ax4.set_yticklabels(('0.01', '0.1', '1', '10', '100'))
# Labels etc
ax1.set_ylabel('Amplitude (V/m)')
ax3.set_ylabel('Rel. Error (%)')
ax3.set_xlabel('Frequency (Hz)')
ax4.set_xlabel('Time (s)')
ax3.axhline(1, c='k')
ax4.axhline(1, c='k')
# Add instances
self.fig = fig
self.axs = [ax1, ax2, ax3, ax4]
# Plot initial base model
self.update_ftfilt(self.ftarg)
self.plot_base_model()
# Initiate the widgets
self.create_widget()
def reim(self, inp):
"""Return real or imaginary part as a function of signal."""
if self.signal < 0:
return inp.real
else:
return inp.imag
def create_widget(self):
"""Create widgets and their layout."""
# Offset slider.
off = widgets.interactive(
self.update_off,
off=widgets.IntSlider(
min=500,
max=10000,
description='Offset (m)',
value=self.model['rec'][0],
step=250,
continuous_update=False,
style={'description_width': '60px'},
layout={'width': '260px'},
),
)
# Pts/dec slider.
pts_per_dec = widgets.interactive(
self.update_pts_per_dec,
pts_per_dec=widgets.IntSlider(
min=1,
max=10,
description='pts/dec',
value=self.pts_per_dec,
step=1,
continuous_update=False,
style={'description_width': '60px'},
layout={'width': '260px'},
),
)
# Linear/logarithmic selection.
linlog = widgets.interactive(
self.update_linlog,
linlog=widgets.ToggleButtons(
value=self.linlog,
options=['linear', 'log'],
description='Display',
style={'description_width': '60px', 'button_width': '100px'},
),
)
# Frequency-range slider.
freq_range = widgets.interactive(
self.update_freq_range,
freq_range=widgets.FloatRangeSlider(
value=[np.log10(self.fmin), np.log10(self.fmax)],
description='f-range',
min=-4,
max=3,
step=0.1,
continuous_update=False,
style={'description_width': '60px'},
layout={'width': '260px'},
),
)
# Signal selection (-1, 0, 1).
signal = widgets.interactive(
self.update_signal,
signal=widgets.ToggleButtons(
value=self.signal,
options=[-1, 0, 1],
description='Signal',
style={'description_width': '60px', 'button_width': '65px'},
),
)
# Fourier transform method selection.
def _get_init():
"""Return initial choice of Fourier Transform."""
if self.ft == 'fftlog':
return self.ft
else:
return self.ftarg['dlf'].savename
ftfilt = widgets.interactive(
self.update_ftfilt,
ftfilt=widgets.Dropdown(
options=['fftlog', 'key_81_CosSin_2009',
'key_241_CosSin_2009', 'key_601_CosSin_2009',
'key_101_CosSin_2012', 'key_201_CosSin_2012'],
description='Fourier',
value=_get_init(), # Initial value
style={'description_width': '60px'},
layout={'width': 'max-content'},
),
)
# Group them together.
t1col1 = widgets.VBox(children=[pts_per_dec, freq_range],
layout={'width': '310px'})
t1col2 = widgets.VBox(children=[off, ftfilt],
layout={'width': '310px'})
t1col3 = widgets.VBox(children=[signal, linlog],
layout={'width': '310px'})
# Group them together.
display(widgets.HBox(children=[t1col1, t1col2, t1col3]))
# Plotting and calculation routines.
def clear_handle(self, handles):
"""Clear `handles` from figure."""
for hndl in handles:
if hasattr(self, 'h_'+hndl):
getattr(self, 'h_'+hndl).remove()
def adjust_lim(self):
"""Adjust axes limits."""
# Adjust y-limits f-domain
if self.linlog == 'linear':
self.axs[0].set_ylim([1.1*min(self.reim(self.f_dense)),
1.5*max(self.reim(self.f_dense))])
else:
self.axs[0].set_ylim([5*min(self.reim(self.f_dense)),
5*max(self.reim(self.f_dense))])
# Adjust x-limits f-domain
self.axs[0].set_xlim([min(self.freq_req), max(self.freq_req)])
# Adjust y-limits t-domain
if self.linlog == 'linear':
self.axs[1].set_ylim(
[min(-max(self.t_base)/20, 0.9*min(self.t_base)),
max(-min(self.t_base)/20, 1.1*max(self.t_base))])
else:
self.axs[1].set_ylim([10**(np.log10(max(self.t_base))-5),
1.5*max(self.t_base)])
# Adjust x-limits t-domain
if self.linlog == 'linear':
if self.signal == 0:
self.axs[1].set_xlim(
[0, self.xtfact*self.model['rec'][0]/1000])
else:
self.axs[1].set_xlim([0, max(self.time)])
else:
self.axs[1].set_xlim([min(self.time), max(self.time)])
def print_suptitle(self):
"""Update suptitle."""
plt.suptitle(
f"Offset = {np.squeeze(self.model['rec'][0])/1000} km; "
f"No. freq. coarse: {self.freq_calc.size}; No. freq. full: "
f"{self.freq_req.size} ({self.freq_req.min():.1e} $-$ "
f"{self.freq_req.max():.1e} Hz)")
def plot_base_model(self):
"""Update smooth, 'correct' model."""
# Calculate responses
self.f_dense = empymod.dipole(freqtime=self.freq_dense, **self.model)
self.t_base = empymod.dipole(
freqtime=self.time, signal=self.signal, **self.model)
# Clear existing handles
self.clear_handle(['f_base', 't_base'])
# Plot new result
self.h_f_base, = self.axs[0].plot(
self.freq_dense, self.reim(self.f_dense), 'C3')
self.h_t_base, = self.axs[1].plot(self.time, self.t_base, 'C3')
self.adjust_lim()
def plot_coarse_model(self):
"""Update coarse model."""
# Calculate the f-responses for required and the calculation range.
f_req = empymod.dipole(freqtime=self.freq_req, **self.model)
f_calc = empymod.dipole(freqtime=self.freq_calc, **self.model)
# Interpolate from calculated to required frequencies and transform.
f_int = self.interpolate(f_calc)
t_int = self.freq2time(f_calc, self.model['rec'][0])
# Calculate the errors.
f_error = np.clip(100*abs((self.reim(f_int)-self.reim(f_req)) /
self.reim(f_req)), 0.01, 100)
t_error = np.clip(100*abs((t_int-self.t_base)/self.t_base), 0.01, 100)
# Clear existing handles
self.clear_handle(['f_int', 't_int', 'f_inti', 'f_inte', 't_inte'])
# Plot frequency-domain result
self.h_f_inti, = self.axs[0].plot(
self.freq_req, self.reim(f_int), 'k.', ms=4)
self.h_f_int, = self.axs[0].plot(
self.freq_calc, self.reim(f_calc), 'C0.', ms=8)
self.h_f_inte, = self.axs[2].plot(self.freq_req, f_error, 'k.')
# Plot time-domain result
self.h_t_int, = self.axs[1].plot(self.time, t_int, 'k--')
self.h_t_inte, = self.axs[3].plot(self.time, t_error, 'k.')
# Update suptitle
self.print_suptitle()
# Interactive routines
def update_off(self, off):
"""Offset-slider"""
# Update model
self.model['rec'] = [off, self.model['rec'][1], self.model['rec'][2]]
# Redraw models
self.plot_base_model()
self.plot_coarse_model()
def update_pts_per_dec(self, pts_per_dec):
"""pts_per_dec-slider."""
# Store pts_per_dec.
self.pts_per_dec = pts_per_dec
# Redraw through update_ftfilt.
self.update_ftfilt(self.ftarg)
def update_freq_range(self, freq_range):
"""Freq-range slider."""
# Update values
self.fmin = 10**freq_range[0]
self.fmax = 10**freq_range[1]
# Redraw models
self.plot_coarse_model()
def update_ftfilt(self, ftfilt):
"""Ftfilt dropdown."""
# Check if FFTLog or DLF; git DLF filter.
if isinstance(ftfilt, str):
fftlog = ftfilt == 'fftlog'
else:
if 'dlf' in ftfilt:
fftlog = False
ftfilt = ftfilt['dlf'].savename
else:
fftlog = True
# Update Fourier arguments.
if fftlog:
self.fourier_arguments('fftlog', {'pts_per_dec': self.pts_per_dec})
self.freq_inp = None
else:
# Calculate input frequency from min to max with pts_per_dec.
lmin = np.log10(self.freq_req.min())
lmax = np.log10(self.freq_req.max())
self.freq_inp = np.logspace(
lmin, lmax, int(self.pts_per_dec*np.ceil(lmax-lmin)))
self.fourier_arguments(
'dlf', {'dlf': ftfilt, 'pts_per_dec': -1})
# Dense frequencies for comparison reasons
self.freq_dense = np.logspace(np.log10(self.freq_req.min()),
np.log10(self.freq_req.max()), 301)
# Redraw models
self.plot_base_model()
self.plot_coarse_model()
def update_linlog(self, linlog):
"""Adjust x- and y-scaling of both frequency- and time-domain."""
# Store linlog
self.linlog = linlog
# f-domain: x-axis always log; y-axis linear or symlog.
if linlog == 'log':
sym_dec = 10 # Number of decades to show on symlog
lty = int(max(np.log10(abs(self.reim(self.f_dense))))-sym_dec)
self.axs[0].set_yscale('symlog', linthresh=10**lty, linscaley=0.7)
# Remove the zero line becouse of the overlapping ticklabels.
nticks = len(self.axs[0].get_yticks())//2
iticks = np.arange(nticks)
iticks = np.r_[iticks, iticks+nticks+1]
self.axs[0].set_yticks(self.axs[0].get_yticks()[iticks])
else:
self.axs[0].set_yscale(linlog)
# t-domain: either linear or loglog
self.axs[1].set_yscale(linlog)
self.axs[1].set_xscale(linlog)
# Adjust limits
self.adjust_lim()
def update_signal(self, signal):
"""Use signal."""
# Store signal.
self.signal = signal
# Redraw through update_ftfilt.
self.update_ftfilt(self.ftarg)
# Routines for the Adaptive Frequency Selection
def get_new_freq(freq, field, rtol, req_freq=None, full_output=False):
r"""Returns next frequency to calculate.
The field of a frequency is considered stable when it fulfills the
following requirement:
.. math::
\frac{\Im(E_x - E_x^\rm{int})}{\max|E_x|} < rtol .
The adaptive algorithm has two steps:
1. As long as the field at the lowest frequency does not fulfill the
criteria, more frequencies are added at lower frequencies, half a
log10-decade at a time.
2. Once the field at the lowest frequency fulfills the criteria, it moves
towards higher frequencies, adding frequencies if it is not stable (a)
midway (log10-scale) to the next frequency, or (b) half a log10-decade,
if the last frequency was reached.
Only the imaginary field is considered in the interpolation. For the
interpolation, three frequencies are added, 1e-100, 1e4, and 1e100 Hz, all
with a field of 0 V/m. The interpolation is carried out with piecewise
cubic Hermite interpolation (pchip).
Parameters
----------
freq : ndarray
Current frequencies. Initially there must be at least two frequencies.
field : ndarray
E-field corresponding to current frequencies.
rtol : float
Tolerance, to decide if the field is stable around a given frequency.
req_freq : ndarray
Frequencies of a pre-calculated model for comparison in the plots. If
provided, a dashed line with the extent of req_freq and the current
interpolation is shown.
full_output : bool
If True, returns the data from the evaluation.
Returns
-------
new_freq : float
New frequency to be calculated. If ``full_output=True``, it is a
tuple, where the first entry is new_freq.
"""
# Pre-allocate array for interpolated field.
i_field = np.zeros_like(field)
# Loop over frequencies.
for i in range(freq.size):
# Create temporary arrays without this frequency/field.
# (Adding 0-fields at 1e-100, 1e4, and 1e100 Hz.)
if max(freq) < 1e4:
tmp_f = np.r_[1e-100, freq[np.arange(freq.size) != i], 1e4, 1e100]
tmp_s = np.r_[0, field[np.arange(field.size) != i], 0, 0]
else:
tmp_f = np.r_[1e-100, freq[np.arange(freq.size) != i], 1e100]
tmp_s = np.r_[0, field[np.arange(field.size) != i], 0]
# Now interpolate at left-out frequency.
i_field[i] = 1j*si.pchip_interpolate(tmp_f, tmp_s.imag, freq[i])
# Calculate complete interpol. if required frequency-range is provided.
if req_freq is not None:
if max(freq) < 1e4:
tmp_f2 = np.r_[1e-100, freq, 1e4, 1e100]
tmp_s2 = np.r_[0, field, 0, 0]
else:
tmp_f2 = np.r_[1e-100, freq, 1e100]
tmp_s2 = np.r_[0, field, 0]
i_field2 = 1j*si.pchip_interpolate(tmp_f2, tmp_s2.imag, req_freq)
# Calculate the error as a fct of max(|E_x|).
error = np.abs((i_field.imag-field.imag)/max(np.abs(field)))
# Check error; if any bigger than rtol, get a new frequency.
ierr = np.arange(freq.size)[error > rtol]
new_freq = np.array([])
if len(ierr) > 0:
# Calculate log10-freqs and differences between freqs.
lfreq = np.log10(freq)
diff = np.diff(lfreq)
# Add new frequency depending on location in array.
if error[0] > rtol:
# If first frequency is not stable, subtract 1/2 decade.
new_lfreq = lfreq[ierr[0]] - 0.5
elif error[-1] > rtol and len(ierr) == 1:
# If last frequency is not stable, add 1/2 decade.
new_lfreq = lfreq[ierr[0]] + 0.5
else:
# If not first and not last, create new halfway to next frequency.
new_lfreq = lfreq[ierr[0]] + diff[ierr[0]]/2
# Back from log10.
new_freq = 10**np.array([new_lfreq])
# Return new frequencies
if full_output:
if req_freq is not None:
return (new_freq, i_field, error, ierr, i_field2)
else:
return (new_freq, i_field, error, ierr)
else:
return new_freq
def design_freq_range(time, model, rtol, signal, freq_range, xlim_freq=None,
ylim_freq=None, xlim_lin=None, ylim_lin=None,
xlim_log=None, ylim_log=None, pause=0.1):
"""GUI to design required frequencies for Fourier transform."""
# Get required frequencies for provided time and ft, verbose.
time, req_freq, ft, ftarg = empymod.utils.check_time(
time=time, signal=signal, ft=model.get('ft', 'dlf'),
ftarg=model.get('ftarg', {}), verb=3
)
req_freq, ri = np.unique(req_freq, return_inverse=True)
# Use empymod-utilities to print frequency range.
mod = empymod.utils.check_model(
[], 1, None, None, None, None, None, False, 0)
_ = empymod.utils.check_frequency(req_freq, *mod[1:-1], 3)
# Calculate "good" f- and t-domain field.
fine_model = model.copy()
for key in ['ht', 'htarg', 'ft', 'ftarg']:
if key in fine_model:
del fine_model[key]
fine_model['ht'] = 'dlf'
fine_model['htarg'] = {'pts_per_dec': -1}
fine_model['ft'] = 'dlf'
fine_model['ftarg'] = {'pts_per_dec': -1}
sfEM = empymod.dipole(freqtime=req_freq, **fine_model)
stEM = empymod.dipole(freqtime=time, signal=signal, **fine_model)
# Define initial frequencies.
if isinstance(freq_range, tuple):
new_freq = np.logspace(*freq_range)
elif isinstance(freq_range, np.ndarray):
new_freq = freq_range
else:
p, _ = find_peaks(np.abs(sfEM.imag))
# Get first n peaks.
new_freq = req_freq[p[:freq_range]]
# Add midpoints, plus one before.
lfreq = np.log10(new_freq)
new_freq = 10**np.unique(np.r_[lfreq, lfreq[:-1]+np.diff(lfreq),
lfreq[0]-np.diff(lfreq[:2])])
# Start figure and print current number of frequencies.
fig, axs = plt.subplots(2, 3, figsize=(9, 8))
fig.h_sup = plt.suptitle("Number of frequencies: --.", y=1, fontsize=14)
# Subplot 1: Actual signals.
axs[0, 0].set_title(r'Im($E_x$)')
axs[0, 0].set_xlabel('Frequency (Hz)')
axs[0, 0].set_ylabel(r'$E_x$ (V/m)')
axs[0, 0].set_xscale('log')
axs[0, 0].get_shared_x_axes().join(axs[0, 0], axs[1, 0])
if xlim_freq is not None:
axs[0, 0].set_xlim(xlim_freq)
else:
axs[0, 0].set_xlim([min(req_freq), max(req_freq)])
if ylim_freq is not None:
axs[0, 0].set_ylim(ylim_freq)
axs[0, 0].plot(req_freq, sfEM.imag, 'k')
# Subplot 2: Error.
axs[1, 0].set_title(r'$|\Im(E_x-E^{\rm{int}}_x)/\max|E_x||$')
axs[1, 0].set_xlabel('Frequency (Hz)')
axs[1, 0].set_ylabel('Relative error (%)')
axs[1, 0].axhline(100*rtol, c='k') # Tolerance of error-level.
axs[1, 0].set_yscale('log')
axs[1, 0].set_xscale('log')
axs[1, 0].set_ylim([1e-2, 1e2])
# Subplot 3: Linear t-domain model.
axs[0, 1].set_xlabel('Time (s)')
axs[0, 1].get_shared_x_axes().join(axs[0, 1], axs[1, 1])
if xlim_lin is not None:
axs[0, 1].set_xlim(xlim_lin)
else:
axs[0, 1].set_xlim([min(time), max(time)])
if ylim_lin is not None:
axs[0, 1].set_ylim(ylim_lin)
else:
axs[0, 1].set_ylim(
[min(-max(stEM)/20, 0.9*min(stEM)),
max(-min(stEM)/20, 1.1*max(stEM))])
axs[0, 1].plot(time, stEM, 'k-', lw=1)
# Subplot 4: Error linear t-domain model.
axs[1, 1].set_title('Error')
axs[1, 1].set_xlabel('Time (s)')
axs[1, 1].axhline(100*rtol, c='k')
axs[1, 1].set_yscale('log')
axs[1, 1].set_ylim([1e-2, 1e2])
# Subplot 5: Logarithmic t-domain model.
axs[0, 2].set_xlabel('Time (s)')
axs[0, 2].set_xscale('log')
axs[0, 2].set_yscale('log')
axs[0, 2].get_shared_x_axes().join(axs[0, 2], axs[1, 2])
if xlim_log is not None:
axs[0, 2].set_xlim(xlim_log)
else:
axs[0, 2].set_xlim([min(time), max(time)])
if ylim_log is not None:
axs[0, 2].set_ylim(ylim_log)
axs[0, 2].plot(time, stEM, 'k-', lw=1)
# Subplot 6: Error logarithmic t-domain model.
axs[1, 2].set_title('Error')
axs[1, 2].set_xlabel('Time (s)')
axs[1, 2].axhline(100*rtol, c='k')
axs[1, 2].set_yscale('log')
axs[1, 2].set_xscale('log')
axs[1, 2].set_ylim([1e-2, 1e2])
plt.tight_layout()
fig.canvas.draw()
plt.pause(pause)
# Pre-allocate arrays.
freq = np.array([], dtype=float)
fEM = np.array([], dtype=complex)
# Loop until satisfied.
while len(new_freq) > 0:
# Calculate fEM for new frequencies.
new_fEM = empymod.dipole(freqtime=new_freq, **model)
# Combine existing and new frequencies and fEM.
freq, ai = np.unique(np.r_[freq, new_freq], return_index=True)
fEM = np.r_[fEM, new_fEM][ai]
# Check if more frequencies are required.
out = get_new_freq(freq, fEM, rtol, req_freq, True)
new_freq = out[0]
# Calculate corresponding time-domain signal.
# 1. Interpolation to required frequencies
# Slightly different for real and imaginary parts.
# 3-point ramp from last frequency, step-size is diff. btw last two
# freqs.
lfreq = np.log10(freq)
freq_ramp = 10**(np.ones(3)*lfreq[-1] +
np.arange(1, 4)*np.diff(lfreq[-2:]))
fEM_ramp = np.array([0.75, 0.5, 0.25])*fEM[-1]
# Imag: Add ramp and also 0-fields at +/-1e-100.
itmp_f = np.r_[1e-100, freq, freq_ramp, 1e100]
itmp_s = np.r_[0, fEM.imag, fEM_ramp.imag, 0]
isfEM = si.pchip_interpolate(itmp_f, itmp_s, req_freq)
# Real: Add ramp and also 0-fields at +1e-100 (not at -1e-100).
rtmp_f = np.r_[freq, freq_ramp, 1e100]
rtmp_s = np.r_[fEM.real, fEM_ramp.real, 0]
rsfEM = si.pchip_interpolate(rtmp_f, rtmp_s, req_freq)
# Combine
sfEM = rsfEM + 1j*isfEM
# Re-arrange req_freq and sfEM if ri is provided.
if ri is not None:
req_freq = req_freq[ri]
sfEM = sfEM[ri]
# 2. Carry out the actual Fourier transform.
# (without checking for QWE convergence.)
tEM, _ = empymod.model.tem(
sfEM[:, None], np.atleast_1d(model['rec'][0]), freq=req_freq,
time=time, signal=signal, ft=ft, ftarg=ftarg)
# Reshape and return
nrec, nsrc = 1, 1
tEM = np.squeeze(tEM.reshape((-1, nrec, nsrc), order='F'))
# Clean up old lines before updating plots.
names = ['tlin', 'tlog', 'elin', 'elog', 'if2', 'err', 'erd', 'err1',
'erd1']
for name in names:
if hasattr(fig, 'h_'+name):
getattr(fig, 'h_'+name).remove()
# Adjust number of frequencies.
fig.h_sup = plt.suptitle(f"Number of frequencies: {freq.size}.",
y=1, fontsize=14)
# Plot the interpolated points.
error_bars = [fEM.imag-out[1].imag, fEM.imag*0]
fig.h_err = axs[0, 0].errorbar(
freq, fEM.imag, yerr=error_bars, fmt='.', ms=8, color='k',
ecolor='C0', label='Calc. points')
# Plot the error.
fig.h_erd, = axs[1, 0].plot(freq, 100*out[2], 'C0o', ms=6)
# Make frequency under consideration blue.
ierr = out[3]
if len(ierr) > 0:
iierr = ierr[0]
fig.h_err1, = axs[0, 0].plot(freq[iierr], out[1][iierr].imag,
'bo', ms=6)
fig.h_erd1, = axs[1, 0].plot(freq[iierr], 100*out[2][iierr],
'bo', ms=6)
# Plot complete interpolation.
fig.h_if2, = axs[0, 0].plot(req_freq, out[4].imag, 'C0--')
# Plot current time domain result and error.
fig.h_tlin, = axs[0, 1].plot(time, tEM, 'C0-')
fig.h_tlog, = axs[0, 2].plot(time, tEM, 'C0-')
fig.h_elin, = axs[1, 1].plot(time, 100*abs((tEM-stEM)/stEM), 'r--')
fig.h_elog, = axs[1, 2].plot(time, 100*abs((tEM-stEM)/stEM), 'r--')
plt.tight_layout()
fig.canvas.draw()
plt.pause(pause)
# Return time-domain signal (correspond to provided times); also
# return used frequencies and corresponding signal.
return tEM, freq, fEM
| StarcoderdataPython |
3490776 | <reponame>Erotemic/hotspotter
from numpy import asarray, percentile, uint8, uint16
import numpy as np
from hotspotter.other.logger import logdbg, logwarn
from PIL import Image, ImageOps
'''
from skimage.util.dtype import dtype_range
from skimage import exposure
from skimage.morphology import disk
from numpy import asarray, percentile, uint8, uint16
from matplotlib.pyplot import *
cm = hs.cm
fig = figure(42)
raw_chip = cm.cx2_raw_chip(4)
img = raw_chip
pil_raw = Image.fromarray( raw_chip ).convert('L')
pil_filt = am.resize_chip(pil_raw, am.preproc.sqrt_num_pxls)
img = asarray(pil_filt)
imshow(img)
fig.show()
'''
# http://scikit-image.org/docs/dev/api/skimage.filter.html#denoise-bilateral
# How can I hold all of these algorithms?!
def contrast_stretch(img):
from skimage.morphology import disk
from skimage.util.dtype import dtype_range
from skimage import exposure
# Contrast stretching
p2 = percentile(img, 2)
p98 = percentile(img, 98)
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
print img_rescale.dtype
return img_rescale
def histeq(pil_img):
img = asarray(pil_img)
try:
from skimage import exposure
'Local histogram equalization'
# Equalization
img_eq_float64 = exposure.equalize_hist(img)
return Image.fromarray(uint8(np.round(img_eq_float64*255)))
except Exception as ex:
from hotspotter.tpl.other import imtools
logdbg('Scikits not found: %s' % str(ex))
logdbg('Using fallback histeq')
return Image.fromarray(imtools.histeq(img)).convert('L')
def adapt_histeq(img):
try:
from skimage import exposure
# input uint8, output uint16
img_uint8 = img
img_uint16 = uint16(img)*2**8
img_adapteq_uint16 = exposure.equalize_adapthist(img_uint16,\
ntiles_x=8,\
ntiles_y=8,\
clip_limit=0.01,\
nbins=256)
img_adapteq_cropped_uint8 = uint8(img_adapteq_uint16[5:-5][5:-5] / uint16(2)**8 )
return img_adapteq_cropped_uint8
except Exception as ex:
logdbg('Scikits not found: %s' % str(ex))
logwarn('Scikits not found: %s' % str(ex))
return img
def bilateral_filter(img):
try:
import skimage.filter
img_uint8 = img
img_float = float32(img_uint8) / 255
#mode = Points outside the boundaries of the input are filled according to the given mode (?constant?, ?nearest?, ?reflect? or ?wrap?). Default is ?constant?.
img_bilat = skimage.filter.denoise_bilateral(img_float, win_size=20, sigma_range=1.6, sigma_spatial=1.6, bins=256, mode='reflect', cval='reflect')
return img_bilat
except Exception as ex:
logdbg('Scikits not found: %s' % str(ex))
logwarn('Scikits not found: %s' % str(ex))
return img
def segment(img):
raise NotImplementedError
import scimage.segmentation.felzenswalb
def superpixels(img):
raise NotImplementedError
import scimage.segmentation.slic
def skelatonize(img):
raise NotImplementedError
import scimage.morphology.skeletonize
| StarcoderdataPython |
1674006 | <gh_stars>10-100
# Basic training configuration file
from torch.optim import SGD
from torch.optim.lr_scheduler import MultiStepLR, ReduceLROnPlateau
from torchvision.transforms import RandomVerticalFlip, RandomHorizontalFlip, RandomCrop
from torchvision.transforms import ColorJitter, ToTensor
from models.small_nasnet_a_mobile import SmallNASNetAMobile
from transforms import GlobalContrastNormalize
SEED = 12345
DEBUG = True
OUTPUT_PATH = "output"
DATASET_PATH = "/home/fast_storage/tiny-imagenet-200/"
TRAINVAL_SPLIT = {
'fold_index': 0,
'n_splits': 7
}
MODEL = SmallNASNetAMobile(num_classes=200)
N_EPOCHS = 100
BATCH_SIZE = 128
VAL_BATCH_SIZE = 100
NUM_WORKERS = 8
OPTIM = SGD(MODEL.parameters(), lr=0.1)
# LR_SCHEDULERS = [
# MultiStepLR(OPTIM, milestones=[55, 70, 80, 90, 100], gamma=0.5)
# ]
TRAIN_TRANSFORMS = [
RandomCrop(size=64, padding=10),
RandomHorizontalFlip(p=0.5),
RandomVerticalFlip(p=0.5),
ColorJitter(hue=0.1, brightness=0.1),
ToTensor(),
# https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/scripts/datasets/make_cifar10_gcn_whitened.py#L19
GlobalContrastNormalize(scale=55.0)
]
VAL_TRANSFORMS = [
RandomHorizontalFlip(p=0.5),
RandomVerticalFlip(p=0.5),
ToTensor(),
# https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/scripts/datasets/make_cifar10_gcn_whitened.py#L19
GlobalContrastNormalize(scale=55.0)
]
REDUCE_LR_ON_PLATEAU = ReduceLROnPlateau(OPTIM, mode='min', factor=0.1, patience=10, verbose=True)
EARLY_STOPPING_KWARGS = {
'patience': 30,
# 'score_function': None
}
LOG_INTERVAL = 100
| StarcoderdataPython |
381333 | """Utility to train keras models.
----
Copyright 2019 Data Driven Empathy LLC
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import keras
import sklearn.utils
import numpy
import wandb
import wandb.keras
class Trainer:
"""Interface for strategies to train different keras models."""
def train(self, data_frame, model, epochs=None):
"""Train a new keras model.
Args:
data_frame: The data frame from which article data should be read.
model: The keras.models.Model to be trained.
epochs: The number of epochs over which the model should be trained.
"""
raise NotImplementedError('Must use implementor.')
class OccurrenceTrainer(Trainer):
"""Strategy to train a feed forward network on word occurrences / co-ocurrences."""
def __init__(self, train_vector_col, source_id_vector_col, source_col, fox_weight,
epochs=30, set_assignment_col='set_assignment', use_wandb=True):
"""Create a new occurrence training strategy.
Args:
train_vector_col: The name of the column with the input vector.
source_id_vector_col: The name of the column with the output.
source_col: The column with the name of the news source.
fox_weight: The amount of fox resampling from 0 (no resampling) to 1 (every article
resampled once).
epochs: The number of epochs for which the model should be trained. Defaults to 30.
set_assignment_col: The column indicating in which set (train, test, validation) an
instance is assigned. Defaults to set_assignment.
use_wandb: Flag indicating if results should be reported to W&B. Defaults to True.
"""
self.__set_assignment_col = set_assignment_col
self.__train_vector_col = train_vector_col
self.__source_id_vector_col = source_id_vector_col
self.__source_col = source_col
self.__fox_weight = fox_weight
self.__epochs = epochs
self.__use_wandb = use_wandb
def train(self, data_frame, model):
"""Train a feed forward model.
Args:
data_frame: The data frame from which article data should be read.
model: The keras.models.Model to be trained.
"""
training_data = data_frame[data_frame[self.__set_assignment_col] == 'train']
training_data_fox = data_frame[data_frame[self.__source_col] == 'Fox']
x_train_reg = numpy.array(training_data[self.__train_vector_col].tolist())
y_train_reg = numpy.array(training_data[self.__source_id_vector_col].tolist())
x_train_fox = numpy.array(training_data_fox[self.__train_vector_col].tolist())
x_train_fox = x_train_fox[:round(self.__fox_weight * training_data_fox.shape[0])]
y_train_fox = numpy.array(training_data_fox[self.__source_id_vector_col].tolist())
y_train_fox = y_train_fox[:round(self.__fox_weight * training_data_fox.shape[0])]
if self.__fox_weight > 0:
x_train = numpy.concatenate((x_train_reg, x_train_fox))
y_train = numpy.concatenate((y_train_reg, y_train_fox))
else:
x_train = x_train_reg
y_train = y_train_reg
training_data_valid = data_frame[data_frame[self.__set_assignment_col] == 'validation']
x_train_valid = numpy.array(training_data_valid[self.__train_vector_col].tolist())
y_train_valid = numpy.array(training_data_valid[self.__source_id_vector_col].tolist())
callbacks = []
if self.__use_wandb:
callbacks.append(wandb.keras.WandbCallback())
model.fit(
x_train,
y_train,
epochs=self.__epochs,
validation_data=(x_train_valid, y_train_valid),
callbacks=callbacks
)
class SequenceTrainer(Trainer):
"""Strategy to train an LSTM network."""
def __init__(self, train_vector_col, source_id_vector_col, source_col, max_seq_len,
fox_weight, epochs=30, set_assignment_col='set_assignment', use_wandb=True):
"""Create a new LSTM training strategy.
Args:
train_vector_col: The name of the column with the input vector.
source_id_vector_col: The name of the column with the output.
source_col: The column with the name of the news source.
fox_weight: The amount of fox resampling from 0 (no resampling) to 1 (every article
resampled once).
epochs: The number of epochs for which the model should be trained. Defaults to 30.
set_assignment_col: The column indicating in which set (train, test, validation) an
instance is assigned. Defaults to set_assignment.
use_wandb: Flag indicating if results should be reported to W&B. Defaults to True.
"""
self.__set_assignment_col = set_assignment_col
self.__train_vector_col = train_vector_col
self.__source_id_vector_col = source_id_vector_col
self.__source_col = source_col
self.__max_seq_len = max_seq_len
self.__fox_weight = fox_weight
self.__set_assignment_col = set_assignment_col
self.__epochs = epochs
self.__use_wandb = use_wandb
def train(self, data_frame, model):
"""Train a sequence model.
Args:
data_frame: The data frame from which article data should be read.
model: The keras.models.Model to be trained.
"""
training_data = data_frame[data_frame[self.__set_assignment_col] == 'train']
training_data_fox = data_frame[data_frame[self.__source_col] == 'Fox']
x_train_reg = numpy.array(training_data[self.__train_vector_col].tolist())
y_train_reg = numpy.array(training_data[self.__source_id_vector_col].tolist())
x_train_fox = numpy.array(training_data_fox[self.__train_vector_col].tolist())
x_train_fox = x_train_fox[:round(self.__fox_weight * training_data_fox.shape[0])]
y_train_fox = numpy.array(training_data_fox[self.__source_id_vector_col].tolist())
y_train_fox = y_train_fox[:round(self.__fox_weight * training_data_fox.shape[0])]
if self.__fox_weight > 0:
x_train = numpy.concatenate((x_train_reg, x_train_fox))
y_train = numpy.concatenate((y_train_reg, y_train_fox))
else:
x_train = x_train_reg
y_train = y_train_reg
x_train_pad = keras.preprocessing.sequence.pad_sequences(
x_train,
maxlen=self.__max_seq_len
)
training_data_valid = data_frame[data_frame[self.__set_assignment_col] == 'validation']
x_train_valid = numpy.array(training_data_valid[self.__train_vector_col].tolist())
y_train_valid = numpy.array(training_data_valid[self.__source_id_vector_col].tolist())
x_train_pad_valid = keras.preprocessing.sequence.pad_sequences(
x_train_valid,
maxlen=self.__max_seq_len
)
callbacks = []
if self.__use_wandb:
callbacks.append(wandb.keras.WandbCallback())
model.fit(
x_train_pad,
y_train,
epochs=self.__epochs,
validation_data=(x_train_pad_valid, y_train_valid),
callbacks=callbacks
)
| StarcoderdataPython |
3478434 | from django.db import models
class Log(models.Model):
name = models.CharField(max_length=500)
country = models.CharField(max_length=500)
email = models.CharField(max_length=500)
requestdate = models.CharField(max_length=500)
requestdeadline = models.IntegerField()
apikeys = models.CharField(max_length=5000)
apikeysstring = models.CharField(max_length=5000)
expiring = models.IntegerField()
expired = models.IntegerField()
| StarcoderdataPython |
1758159 | <reponame>ToucanToco/vue-query-builder
from typing import Literal
from pydantic import validator
from weaverbird.pipeline.steps.utils.base import BaseStep
class SimplifyStep(BaseStep):
name: Literal['simplify'] = 'simplify'
# All parts of the simplified geometry will be no more than this distance from the original
tolerance: float = 1.0
@validator('tolerance')
def _tolerance_validator(cls, value: float) -> float:
assert value > 0, "tolerance must be strictly positive"
return value
| StarcoderdataPython |
397410 | #!/usr/bin/env python
import argparse
import functools
import io
import logging
import os
import re
import tarfile
import tempfile
from ruamel.yaml import YAML
from ruamel.yaml.scalarstring import LiteralScalarString
MAX_OUTPUT_SIZE = 24 * 1024
ERR_SUCCESS = 0
ERR_NOT_FOUND = 1
ERR_ALREADY_EXISTS = 2
ERR_FILE_TOO_LARGE = 3
LOCAL_CONTENT_PATH = "local-content-path"
LOCAL_CONTENT_TAR_PATH = "local-content-tar-path"
def read_file(path):
"""
Read all content of file.
:param path: path to file
:return: content of file, error code
"""
if os.path.exists(path):
with open(path, "rb") as f:
return f.read(), ERR_SUCCESS
logging.error("File {} not found".format(path))
return "", ERR_NOT_FOUND
def is_ascii(content):
for c in content:
if c >= 128:
return False
return True
# Function that fixes file permissions in a tar.
def tarinfo_filter(owner, permissions, tarinfo):
if owner == "root":
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = owner
elif owner:
# we don't know the uid/gid - should we assume gname == uname?
tarinfo.uname = owner
if permissions:
# Note: this will replace permissions on files and folders
# Possibly add support for adding executable bit to folders?
old_perm = int(permissions, 8)
new_mode = (tarinfo.mode & int("7777000", 8)) | old_perm
tarinfo.mode = new_mode
return tarinfo
def generate(template, output, content_root, subst, force=False, large=False):
"""
Generate yaml file from template
:param template: path to template file
:param result: output file to write to
:param content_root: path root folder for content files
:param subst: dictionary of substitution variables
:param force: overwrite output file even if it exists
:param large: support large (>24k) output files
:return:
"""
template_content, err = read_file(template)
if err:
return err
yaml = YAML()
code = yaml.load(template_content)
for f in code.get("write_files"):
if LOCAL_CONTENT_PATH in f:
path = os.path.join(content_root, f.get(LOCAL_CONTENT_PATH))
content, err = read_file(path)
if err:
return err
del f[LOCAL_CONTENT_PATH]
if is_ascii(content):
f["content"] = LiteralScalarString(content.decode('ascii'))
else:
f["content"] = content
elif LOCAL_CONTENT_TAR_PATH in f:
owner = f.get("owner", "")
permissions = f.get("permissions", "")
path = os.path.join(content_root, f.get(LOCAL_CONTENT_TAR_PATH))
_, tgz_path = tempfile.mkstemp(".tgz")
tgz = tarfile.open(tgz_path, "w:gz")
tgz.add(path,
arcname=".",
recursive=True,
filter=functools.partial(tarinfo_filter, owner, permissions))
tgz.close()
with open(tgz_path, "rb") as tgz_file:
tgz_content = tgz_file.read()
os.unlink(tgz_path)
del f[LOCAL_CONTENT_TAR_PATH]
f["content"] = tgz_content
if os.path.exists(output) and not force:
logging.error("Output file already exists. Remove or specify --force")
return ERR_ALREADY_EXISTS
result_file = io.StringIO()
yaml.dump(code, result_file)
result_file.seek(0)
result = result_file.read()
# Remove comments
result = re.sub("\s*#@\s.*$", "", result, flags=re.MULTILINE)
# Substitutions
for s in sorted(subst.keys()):
result = result.replace("@@" + s + "@@", subst[s])
# Check size
if (not large) and (len(result) > MAX_OUTPUT_SIZE):
logging.error("Output file too large!")
return ERR_FILE_TOO_LARGE
with open(output, "w") as f:
f.write(result)
logging.info("Generated {} successfully.".format(output))
return ERR_SUCCESS
def main(args=None):
parser = argparse.ArgumentParser(description="Generate yaml file from template ")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Increase output verbosity")
parser.add_argument("-c", "--content-root", default=".",
help="Root folder for local-content-path files")
parser.add_argument("-t", "--template", default="template.yaml",
help="Template file to read")
parser.add_argument("-o", "--output", default="generated.yaml",
help="Output file to write to")
parser.add_argument("-f", "--force", default=False, action="store_true",
help="Overwrite output file even if exists")
parser.add_argument("-xl", "--large", default=False, action="store_true",
help="Allow large (>24Kb) template output")
parser.add_argument("substitution", nargs="*",
help="key=value pairs for @@KEY@@ substitutions")
parsed_args = parser.parse_args(args=args)
if parsed_args.verbose > 1:
log_level = logging.DEBUG
elif parsed_args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(level=log_level, format="%(message)s")
subst = {}
for s in parsed_args.substitution:
split = s.split("=", 1)
if len(split) < 2 or not split[0].isupper():
parser.error("substitution \"{}\" format error - correct is: \"UPPERCASE=some value\"".format(s))
exit(1)
subst[split[0]] = split[1]
for k in sorted(subst.keys()):
logging.info("substituting {}=\"{}\"".format(k, subst[k]))
return generate(
template=parsed_args.template,
output=parsed_args.output,
content_root=parsed_args.content_root,
subst=subst,
force=parsed_args.force,
large=parsed_args.large)
if __name__ == "__main__":
exit(main())
| StarcoderdataPython |
3335796 | <filename>epab/exc.py
# coding=utf-8
"""
EPAB's exceptions
"""
class ExecutableNotFoundError(FileNotFoundError):
"""
Raised when an executable isn't found
"""
| StarcoderdataPython |
11384852 | from time import sleep
from src.utils.common.clear import clear
from src.views.common.PersonDetail import printPerson
from src.views.common.exitOrHome import exitOrHome
from src.views.components.SearchBy import searchByEmail
from src.views.components.SearchBy import searchByLastname
from src.views.components.SearchBy import searchByName
from src.views.components.SearchBy import searchByPhone
from src.views.components.SelectEdit import SelectEdit
switch = {
"n": searchByName,
"l": searchByLastname,
"e": searchByEmail,
"p": searchByPhone,
"b": exitOrHome
}
def PersonSearch():
clear()
print("""
> Searching a person
So we are going to help you look for someone in detail
and maybe edit some of their data (or delete them if you want to)
Here is how you can search them:
N .- Name
L .- Last Name
E .- Email (if it has one)
P .- Phone
B .- Back
""")
how = str(input("What's your move?: ")).strip().lower()
if how not in switch:
print("Wrong option")
sleep(1)
PersonSearch()
if how == "b":
switch[how]()
return
persons = switch[how](PersonSearch)
SelectEdit(persons) | StarcoderdataPython |
96400 | <filename>bin/state.py<gh_stars>0
#!/usr/bin/env python
import re
from energy import *
if __name__ == "__main__":
ph = 0.0
eh = 0.0
if len(sys.argv) > 1:
cutoff = float(sys.argv[1])
else:
cutoff = PW_PRINT_CUT
lines = open("states").readlines()
line = lines.pop(0)
fields = line.strip().split(",")
mc_parm = {}
for field in fields:
key, value = field.split("=")
key = key.upper()
mc_parm[key.strip()] = float(value)
T = mc_parm["T"]
ph = mc_parm["PH"]
eh = mc_parm["EH"]
for line in lines:
state = [int(x) for x in re.findall(r"[\w']+", line)]
analyze_state_energy(state, ph=ph, eh=eh, cutoff=cutoff)
# for conf in head3lst:
# conf.printme()
| StarcoderdataPython |
3312695 | <gh_stars>0
#!/usr/bin/python3
import pytest
import logging
import matplotlib.pyplot as plt
import simtools.io_handler as io
from simtools.model.telescope_model import TelescopeModel
from simtools.camera_efficiency import CameraEfficiency
from simtools.util.tests import (
has_db_connection,
simtel_installed,
DB_CONNECTION_MSG,
SIMTEL_MSG,
)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
@pytest.mark.skipif(not has_db_connection(), reason=DB_CONNECTION_MSG)
@pytest.mark.skipif(not simtel_installed(), reason=SIMTEL_MSG)
def test_main():
label = "test_camera_eff"
tel = TelescopeModel(
site="North",
telescopeModelName="LST-1",
label=label
)
ce = CameraEfficiency(telescopeModel=tel)
ce.simulate(force=True)
ce.analyze(force=True)
# Plotting Cherenkov
plt.figure(figsize=(8, 6), tight_layout=True)
ax = plt.gca()
ax.set_xlabel("wl")
ax.set_ylabel("eff")
ce.plot("cherenkov")
plotFileCherenkov = io.getTestPlotFile("camera_eff_cherenkov.pdf")
plt.savefig(plotFileCherenkov)
# Plotting NSB
plt.figure(figsize=(8, 6), tight_layout=True)
ax = plt.gca()
ax.set_yscale("log")
ax.set_xlabel("wl")
ax.set_ylabel("eff")
ce.plot("nsb")
plotFileNSB = io.getTestPlotFile("camera_eff_nsb.pdf")
plt.savefig(plotFileNSB)
if __name__ == "__main__":
test_main()
| StarcoderdataPython |
23852 | #!/usr/bin/env python
import argparse
import os
import platform
import re
import shutil
import subprocess
import sys
SUPPORTED_VERSIONS = ('3.6', '3.7')
IS_DEBIAN = platform.system() == 'Linux' and os.path.exists('/etc/debian_version')
IS_OLD_UBUNTU = (IS_DEBIAN and os.path.exists('/etc/lsb-release')
and re.search('RELEASE=1[46]', open('/etc/lsb-release').read()))
IS_MACOS = platform.system() == 'Darwin'
SUDO = 'sudo ' if os.getuid() else ''
parser = argparse.ArgumentParser(description='Check and fix Python installation')
parser.add_argument('--autofix', action='store_true', help='Automatically fix any problems found')
parser.add_argument('--version', default=SUPPORTED_VERSIONS[0], choices=SUPPORTED_VERSIONS,
help='Python version to check')
args = parser.parse_args()
PY_VERSION = args.version
AUTOFIX = args.autofix
def check_sudo():
if not run('which sudo', return_output=True):
error('! sudo is not installed.')
print(' Please ask an administrator to install it and run this again.')
sys.exit(1)
def check_apt():
os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
run(SUDO + 'apt-get install -y apt-utils', return_output=True)
def check_curl():
if not run('which curl', return_output=True):
error('! curl is not installed.')
if IS_DEBIAN:
raise AutoFixSuggestion('To install, run', SUDO + 'apt-get install -y curl')
sys.exit(1)
def check_python():
py3_path = run('which python' + PY_VERSION, return_output=True)
if not py3_path:
error('! Python ' + PY_VERSION + ' is not installed.')
if '--version' not in sys.argv:
print(' autopip supports Python {}.'.format(', '.join(SUPPORTED_VERSIONS))
+ ' To check a different version, re-run using "python - --version x.y"')
if IS_OLD_UBUNTU:
raise AutoFixSuggestion('To install, run',
(SUDO + 'apt-get update',
SUDO + 'apt-get install -y software-properties-common',
SUDO + 'add-apt-repository -y ppa:deadsnakes/ppa',
SUDO + 'apt-get update',
SUDO + 'apt-get install -y python' + PY_VERSION))
elif IS_DEBIAN:
raise AutoFixSuggestion('To install, run',
(SUDO + 'apt-get update', SUDO + 'apt-get install -y python' + PY_VERSION))
elif IS_MACOS:
raise AutoFixSuggestion('To install, run', 'brew install python')
print(' Please install Python ' + PY_VERSION
+ ' per http://docs.python-guide.org/en/latest/starting/installation/')
sys.exit(1)
def check_pip():
if not run('which pip3', return_output=True):
error('! pip3 is not installed.')
if IS_DEBIAN:
raise AutoFixSuggestion('To install, run', SUDO + 'apt-get install -y python3-pip')
elif IS_MACOS:
raise AutoFixSuggestion('To install, run', 'curl -s https://bootstrap.pypa.io/get-pip.py | '
+ SUDO + 'python' + PY_VERSION)
print(' If your package repo has a *-pip package for Python ' + PY_VERSION
+ ', then installing it from there is recommended.')
print(' To install directly, run: curl -s https://bootstrap.pypa.io/get-pip.py | '
+ SUDO + 'python' + PY_VERSION)
sys.exit(1)
version_full = run('pip3 --version', return_output=True)
if 'python ' + PY_VERSION not in version_full:
print(' ' + version_full.strip())
error('! pip3 is pointing to another Python version and not Python ' + PY_VERSION)
if '--version' not in sys.argv:
print(' autopip supports Python {}.'.format(', '.join(SUPPORTED_VERSIONS))
+ ' To check a different version, re-run using "python - --version x.y"')
raise AutoFixSuggestion('To re-install for Python ' + PY_VERSION + ', run',
'curl -s https://bootstrap.pypa.io/get-pip.py | ' + SUDO + 'python' + PY_VERSION)
version_str = version_full.split()[1]
version = tuple(map(_int_or, version_str.split('.', 2)))
if version < (9, 0, 3):
error('! Version is', version_str + ', but should be 9.0.3+')
raise AutoFixSuggestion('To upgrade, run', SUDO + 'pip3 install pip==9.0.3')
def check_venv():
test_venv_path = '/tmp/check-python-venv-{}'.format(os.getpid())
try:
try:
run('python' + PY_VERSION + ' -m venv ' + test_venv_path, stderr=subprocess.STDOUT, return_output=True,
raises=True)
except Exception:
error('! Could not create virtual environment.')
if IS_DEBIAN:
raise AutoFixSuggestion('To install, run', SUDO + 'apt-get install -y python' + PY_VERSION + '-venv')
print(' Please make sure Python venv package is installed.')
sys.exit(1)
finally:
shutil.rmtree(test_venv_path, ignore_errors=True)
try:
try:
run('virtualenv --python python' + PY_VERSION + ' ' + test_venv_path, stderr=subprocess.STDOUT,
return_output=True,
raises=True)
except Exception as e:
if run('which virtualenv', return_output=True):
error('! Could not create virtual environment.')
print(' ' + str(e))
sys.exit(1)
else:
error('! virtualenv is not installed.')
raise AutoFixSuggestion('To install, run', SUDO + 'pip3 install virtualenv')
finally:
shutil.rmtree(test_venv_path, ignore_errors=True)
def check_setuptools():
try:
version_str = run('python' + PY_VERSION + ' -m easy_install --version', return_output=True, raises=True)
except Exception:
error('! setuptools is not installed.')
raise AutoFixSuggestion('To install, run', SUDO + 'pip3 install setuptools')
version_str = version_str.split()[1]
version = tuple(map(_int_or, version_str.split('.')))
if version < (39,):
error('! Version is', version_str + ', but should be 39+')
raise AutoFixSuggestion('To upgrade, run', SUDO + 'pip3 install -U setuptools')
def check_wheel():
try:
version_str = run('python' + PY_VERSION + ' -m wheel version ', return_output=True, raises=True)
except Exception:
error('! wheel is not installed.')
raise AutoFixSuggestion('To install, run', SUDO + 'pip3 install wheel')
version_str = version_str.split()[1]
version = tuple(map(_int_or, version_str.split('.')))
if version < (0, 31):
error('! Version is', version_str + ', but should be 0.31+')
raise AutoFixSuggestion('To upgrade, run', SUDO + 'pip3 install -U wheel')
def check_python_dev():
include_path = run('python' + PY_VERSION
+ ' -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())"',
return_output=True)
if not include_path:
error('! Failed to get Python include path, so not sure if Python dev package is installed')
if IS_DEBIAN:
raise AutoFixSuggestion('To install, run', SUDO + ' apt-get install -y python' + PY_VERSION + '-dev')
sys.exit(1)
python_h = os.path.join(include_path.strip(), 'Python.h')
if not os.path.exists(python_h):
error('! Python dev package is not installed as', python_h, 'does not exist')
if IS_DEBIAN:
raise AutoFixSuggestion('To install, run', SUDO + 'apt-get install -y python' + PY_VERSION + '-dev')
sys.exit(1)
def run(cmd, return_output=False, raises=False, **kwargs):
print('+ ' + str(cmd))
if '"' in cmd or '|' in cmd:
kwargs['shell'] = True
elif isinstance(cmd, str):
cmd = cmd.split()
check_call = subprocess.check_output if return_output else subprocess.check_call
try:
output = check_call(cmd, **kwargs)
if isinstance(output, bytes):
output = output.decode('utf-8')
return output
except Exception:
if return_output and not raises:
return
else:
raise
def _int_or(value):
try:
return int(value)
except Exception:
return value
def error(*msg):
msg = ' '.join(map(str, msg))
echo(msg, color=None if AUTOFIX else 'red')
def echo(msg, color=None):
if sys.stdout.isatty() and color:
if color == 'red':
color = '\033[0;31m'
elif color == 'green':
color = '\033[92m'
msg = color + msg + '\033[0m'
print(msg)
class AutoFixSuggestion(Exception):
def __init__(self, instruction, cmd):
super(AutoFixSuggestion, self).__init__(instruction)
self.cmd = cmd
checks = [check_python, check_pip, check_venv, check_setuptools, check_wheel, check_python_dev]
if AUTOFIX:
checks.insert(0, check_curl)
if IS_DEBIAN:
checks.insert(0, check_apt)
if SUDO:
checks.insert(0, check_sudo)
try:
last_fix = None
for check in checks:
print('Checking ' + check.__name__.split('_', 1)[1].replace('_', ' '))
while True:
try:
check()
break
except AutoFixSuggestion as e:
cmds = e.cmd if isinstance(e.cmd, tuple) else (e.cmd,)
if AUTOFIX:
if cmds == last_fix:
error('! Failed to fix automatically, so you gotta fix it yourself.')
sys.exit(1)
else:
for cmd in cmds:
run(cmd, return_output=True, raises=True)
last_fix = cmds
else:
print(' ' + str(e) + ': ' + ' && '.join(cmds) + '\n')
print('# Run the above suggested command(s) manually and then re-run to continue checking,')
print(' or re-run using "python - --autofix" to run all suggested commands automatically.')
sys.exit(1)
print('')
except Exception as e:
error('!', str(e))
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
echo('Python is alive and well. Good job!', color='green')
| StarcoderdataPython |
1856676 | <filename>heapSort.py
'''
Heap Sort Implementation
Input: [2,5,3,8,6,5,4,7]
Output: [2,3,4,5,5,6,7,8]
'''
def heapify(ls, n, i):
largest = i
l = 2 * i + 1
r = 2 * i + 2
if l < n and ls[i] < ls[l]:
largest = l
if r < n and ls[largest] < ls[r]:
largest = r
if largest != i:
ls[i],ls[largest] = ls[largest],ls[i]
heapify(ls, n, largest)
def heapSort(ls):
n = len(ls)
for i in range(n, -1, -1):
heapify(ls, n, i)
for i in range(n-1, 0, -1):
ls[i], ls[0] = ls[0], ls[i]
heapify(ls, i, 0)
ls = [2,5,3,8,6,5,4,7]
heapSort(ls)
print(ls) | StarcoderdataPython |
4855514 | from __future__ import annotations
import typing as tp
import configparser
import os
from pathlib import Path
DEFAULT_LOGSTORE_DIR = f"{os.getcwd()}/.logexp"
class Settings:
@classmethod
def _get_default_dict(cls) -> tp.Dict:
default_dict = {
"logexp": {
"module": "",
"execution_path": os.getcwd(),
"editor": "vi",
},
"logstore": {
"store_dir": f"{os.getcwd()}/.logexp",
},
}
return default_dict
def __init__(self) -> None:
self._config = configparser.ConfigParser()
self._config.read_dict(Settings._get_default_dict())
self._config.read([
f"{os.getcwd()}/logexp.ini",
os.path.expanduser("~/.logexp.ini")
])
def load(self, filename: Path) -> None:
self._config.read(filename)
@property
def logexp_module(self) -> str:
module = self._config["logexp"]["module"]
return module
@property
def logexp_execpath(self) -> Path:
return Path(self._config["logexp"]["execution_path"])
@property
def logexp_editor(self) -> str:
return self._config["logexp"]["editor"]
@property
def logstore_storepath(self) -> Path:
return Path(self._config["logstore"]["store_dir"])
| StarcoderdataPython |
3443174 | from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns=[
path('',views.home,name='home'),
path('project/review/<project_id>',views.project_review,name='project_review'),
path('search/',views.search_project, name='search_results'),
path('new/project',views.new_project,name='new-project'),
path('profile/',views.profile,name='profile'),
path('new_profile/',views.new_profile,name = 'new_profile'),
path('edit/profile/',views.profile_edit,name = 'edit_profile'),
path('api/profiles/', views.ProfileList.as_view()),
path('api/projects/', views.ProjectList.as_view()),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | StarcoderdataPython |
3446151 | from django.conf.urls import url
from django.contrib import admin
from gallery.views import PhotoListView
import gallery.views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', PhotoListView.as_view(), name='homepage'),
url(r'^ajax/like/photo/(?P<pid>[-\d]+)/value/(?P<val>-?[-\d]+)/$',
gallery.views.set_like, name='set_like'),
]
| StarcoderdataPython |
6665844 | <reponame>timgates42/dlint<filename>tests/test_bad_yaml_use.py
#!/usr/bin/env python
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import unittest
import dlint
class TestBadYAMLUse(dlint.test.base.BaseTest):
def test_bad_yaml_usage(self):
python_string = self.get_ast_node(
"""
import yaml
var1 = {'foo': 'bar'}
var2 = 'test: !!python/object/apply:print ["HAI"]'
yaml.dump(var1)
yaml.dump_all([var1])
yaml.load(var2)
yaml.load_all(var2)
"""
)
linter = dlint.linters.BadYAMLUseLinter()
linter.visit(python_string)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=7,
col_offset=0,
message=dlint.linters.BadYAMLUseLinter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=8,
col_offset=0,
message=dlint.linters.BadYAMLUseLinter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=10,
col_offset=0,
message=dlint.linters.BadYAMLUseLinter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=11,
col_offset=0,
message=dlint.linters.BadYAMLUseLinter._error_tmpl
),
]
assert result == expected
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
11355102 | <reponame>rjcc/bioinformatics_algorithms<filename>frequent_words_with_mismatches.py
from computing_frequencies_with_mismatches import computing_frequencies_with_mismatches
from number_to_pattern import number_to_pattern
def frequent_words_with_mismatches(text, k, d):
frequent_patterns = []
frequency_array = computing_frequencies_with_mismatches(text, k, d)
max_count = max(frequency_array)
for i in range(0, 4**k - 1):
if frequency_array[i] == max_count:
pattern = number_to_pattern(i, k)
frequent_patterns.append(pattern)
return frequent_patterns
if __name__ == "__main__":
text = raw_input("Text: ")
k, d = map(int, raw_input("K / D: ").split(" "))
print " ".join(frequent_words_with_mismatches(text, k, d))
| StarcoderdataPython |
1934276 | <filename>workbench/random_ge_one.py
#!/usr/bin/env python3
"""random_ge_one.py
Add up uniformly distributed random numbers between zero and one, counting how
many you need to have your sum >= 1. An interesting thing happens if you do
this n times and take the average (for large values of n)...
See explanation here, for spoilers:
http://www.mostlymaths.net/2010/08/and-e-appears-from-nowhere.html
GE, 2/9/11
"""
# Used for reading command line arguments
import sys
# Gives (approximately) uniformly distributed random real numbers in [0, 1]
from random import random
# Adds random numbers until sum >= 1; returns the number of iterations needed
def single_run():
total = iters = 0
while total < 1:
total += random()
iters += 1
return iters
# Takes the average of n "single runs"
def n_runs_average(n):
total = 0.0
for i in xrange(n):
total += single_run()
return total / n
# Main execution
if __name__ == "__main__":
try:
print(n_runs_average(int(sys.argv[1])))
except:
print(n_runs_average(1000))
| StarcoderdataPython |
3365985 | '''
Tests for PDP Instance class.
'''
from models import PDPInstance, Point
# generate dummy points
points = [
Point(0, 1, 1),
Point(1, 1, 2),
Point(2, 1, 4)
]
# dummy instance
instance = PDPInstance(1, points)
def test_matrix():
'''
Test distances matrix generation.
'''
matrix = [
[0, 1, 3],
[1, 0, 2],
[3, 2, 0]
]
assert instance.distances == matrix
def test_farthest():
'''
Test getter of the two farthest points.
'''
p1, p2 = instance.get_farthest_points()
assert str(p1) + ' ' + str(p2) == '0 1 1 2 1 4'
| StarcoderdataPython |
11266806 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGraph(RPackage):
"""A package to handle graph data structures.
A package that implements some simple graph handling capabilities."""
bioc = "graph"
version('1.72.0', commit='<KEY>')
version('1.68.0', commit='<PASSWORD>')
version('1.62.0', commit='95223bd63ceb66cfe8d881f992a441de8b8c89a3')
version('1.60.0', commit='e2aecb0a862f32091b16e0036f53367d3edf4c1d')
version('1.58.2', commit='<KEY>')
version('1.56.0', commit='c4abe227dac525757679743e6fb4f49baa34acad')
version('1.54.0', commit='<KEY>')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-biocgenerics@0.13.11:', type=('build', 'run'))
| StarcoderdataPython |
4859290 | # Repository: https://gitlab.com/quantify-os/quantify-scheduler
# Licensed according to the LICENCE file on the master branch
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
from __future__ import annotations
import inspect
import json
import tempfile
from pathlib import Path
import numpy as np
import pytest
from cluster import cluster
from pulsar_qcm import pulsar_qcm
from pulsar_qrm import pulsar_qrm
from quantify_core.data.handling import set_datadir # pylint: disable=no-name-in-module
import quantify_scheduler.schemas.examples as es
from quantify_scheduler.compilation import qcompile
from quantify_scheduler.instrument_coordinator.components import qblox
pytestmark = pytest.mark.usefixtures("close_all_instruments")
esp = inspect.getfile(es)
cfg_f = Path(esp).parent / "transmon_test_config.json"
with open(cfg_f, "r") as f:
DEVICE_CFG = json.load(f)
map_f = Path(esp).parent / "qblox_test_mapping.json"
with open(map_f, "r") as f:
HARDWARE_MAPPING = json.load(f)
@pytest.fixture(name="make_cluster")
def fixture_make_cluster(mocker):
def _make_cluster(name: str = "cluster0") -> qblox.ClusterComponent:
cluster0 = cluster.cluster_dummy(name)
component = qblox.ClusterComponent(cluster0)
mocker.patch("pulsar_qcm.pulsar_qcm_ifc.pulsar_qcm_ifc.arm_sequencer")
mocker.patch("pulsar_qcm.pulsar_qcm_ifc.pulsar_qcm_ifc.start_sequencer")
mocker.patch("pulsar_qcm.pulsar_qcm_ifc.pulsar_qcm_ifc.stop_sequencer")
qcm0 = cluster.cluster_qcm_dummy(f"{name}_qcm0")
qcm1 = cluster.cluster_qcm_dummy(f"{name}_qcm1")
component.add_modules(qcm0, qcm1)
return component
yield _make_cluster
@pytest.fixture(name="make_qcm")
def fixture_make_qcm(mocker):
def _make_qcm(
name: str = "qcm0", serial: str = "dummy"
) -> qblox.PulsarQCMComponent:
mocker.patch(
"pulsar_qcm.pulsar_qcm_scpi_ifc.pulsar_qcm_scpi_ifc._get_lo_hw_present",
return_value=False,
)
mocker.patch("pulsar_qcm.pulsar_qcm_ifc.pulsar_qcm_ifc.arm_sequencer")
mocker.patch("pulsar_qcm.pulsar_qcm_ifc.pulsar_qcm_ifc.start_sequencer")
mocker.patch("pulsar_qcm.pulsar_qcm_ifc.pulsar_qcm_ifc.stop_sequencer")
qcm = pulsar_qcm.pulsar_qcm_dummy(name)
qcm._serial = serial
component = qblox.PulsarQCMComponent(qcm)
mocker.patch.object(component.instrument_ref, "get_instr", return_value=qcm)
mocker.patch.object(
component.instrument,
"get_sequencer_state",
return_value={"status": "ARMED"},
)
return component
yield _make_qcm
@pytest.fixture(name="make_qrm")
def fixture_make_qrm(mocker):
def _make_qrm(
name: str = "qrm0", serial: str = "dummy"
) -> qblox.PulsarQRMComponent:
mocker.patch(
"pulsar_qrm.pulsar_qrm_scpi_ifc.pulsar_qrm_scpi_ifc._get_lo_hw_present",
return_value=False,
)
mocker.patch("pulsar_qrm.pulsar_qrm_ifc.pulsar_qrm_ifc.arm_sequencer")
mocker.patch("pulsar_qrm.pulsar_qrm_ifc.pulsar_qrm_ifc.start_sequencer")
mocker.patch("pulsar_qrm.pulsar_qrm_ifc.pulsar_qrm_ifc.stop_sequencer")
qrm = pulsar_qrm.pulsar_qrm_dummy(name)
qrm._serial = serial
component = qblox.PulsarQRMComponent(qrm)
mocker.patch.object(component.instrument_ref, "get_instr", return_value=qrm)
mocker.patch.object(
component.instrument,
"get_sequencer_state",
return_value={"status": "ARMED"},
)
mocker.patch.object(
component.instrument,
"get_acquisitions",
return_value={
"0": {
"index": 0,
"acquisition": {
"bins": {
"integration": {"path0": [0], "path1": [0]},
"threshold": [0.12],
"avg_cnt": [1],
}
},
}
},
)
return component
yield _make_qrm
@pytest.fixture(name="mock_acquisition_data")
def fixture_mock_acquisition_data():
acq_channel, acq_index_len = 0, 10 # mock 1 channel, N indices
avg_count = 10
data = {
str(acq_channel): {
"index": acq_channel,
"acquisition": {
"scope": {
"path0": {
"data": [0.0] * 2 ** 14,
"out-of-range": False,
"avg_count": avg_count,
},
"path1": {
"data": [0.0] * 2 ** 14,
"out-of-range": False,
"avg_count": avg_count,
},
},
"bins": {
"integration": {
"path0": [0.0] * acq_index_len,
"path1": [0.0] * acq_index_len,
},
"threshold": [0.12] * acq_index_len,
"avg_cnt": [avg_count] * acq_index_len,
},
},
}
}
yield data
@pytest.fixture
def make_qcm_rf(mocker):
def _make_qcm_rf(
name: str = "qcm_rf0", serial: str = "dummy"
) -> qblox.PulsarQCMRFComponent:
mocker.patch(
"pulsar_qcm.pulsar_qcm_scpi_ifc.pulsar_qcm_scpi_ifc._get_lo_hw_present",
return_value=True,
)
mocker.patch("pulsar_qcm.pulsar_qcm_ifc.pulsar_qcm_ifc.arm_sequencer")
mocker.patch("pulsar_qcm.pulsar_qcm_ifc.pulsar_qcm_ifc.start_sequencer")
mocker.patch("pulsar_qcm.pulsar_qcm_ifc.pulsar_qcm_ifc.stop_sequencer")
qcm_rf = pulsar_qcm.pulsar_qcm_dummy(name)
qcm_rf._serial = serial
component = qblox.PulsarQCMRFComponent(qcm_rf)
mocker.patch.object(component.instrument_ref, "get_instr", return_value=qcm_rf)
mocker.patch.object(
component.instrument,
"get_sequencer_state",
return_value={"status": "ARMED"},
)
return component
yield _make_qcm_rf
@pytest.fixture
def make_qrm_rf(mocker):
def _make_qrm_rf(
name: str = "qrm_rf0", serial: str = "dummy"
) -> qblox.PulsarQRMRFComponent:
mocker.patch(
"pulsar_qrm.pulsar_qrm_scpi_ifc.pulsar_qrm_scpi_ifc._get_lo_hw_present",
return_value=True,
)
mocker.patch("pulsar_qrm.pulsar_qrm_ifc.pulsar_qrm_ifc.arm_sequencer")
mocker.patch("pulsar_qrm.pulsar_qrm_ifc.pulsar_qrm_ifc.start_sequencer")
mocker.patch("pulsar_qrm.pulsar_qrm_ifc.pulsar_qrm_ifc.stop_sequencer")
qrm_rf = pulsar_qrm.pulsar_qrm_dummy(name)
qrm_rf._serial = serial
component = qblox.PulsarQRMRFComponent(qrm_rf)
mocker.patch.object(component.instrument_ref, "get_instr", return_value=qrm_rf)
mocker.patch.object(
component.instrument,
"get_sequencer_state",
return_value={"status": "ARMED"},
)
mocker.patch.object(
component.instrument,
"get_acquisitions",
return_value={
"0": {
"index": 0,
"acquisition": {
"bins": {
"integration": {"path0": [0], "path1": [0]},
"threshold": [0.12],
"avg_cnt": [1],
}
},
}
},
)
return component
yield _make_qrm_rf
def test_initialize_pulsar_qcm_component(make_qcm):
make_qcm("qblox_qcm0", "1234")
def test_initialize_pulsar_qrm_component(make_qrm):
make_qrm("qblox_qrm0", "1234")
def test_initialize_pulsar_qcm_rf_component(make_qcm_rf):
make_qcm_rf("qblox_qcm_rf0", "1234")
def test_initialize_pulsar_qrm_rf_component(make_qrm_rf):
make_qrm_rf("qblox_qrm_rf0", "1234")
def test_initialize_cluster_component(make_cluster):
make_cluster("cluster0")
def test_prepare(close_all_instruments, schedule_with_measurement, make_qcm, make_qrm):
# Arrange
qcm: qblox.PulsarQCMComponent = make_qcm("qcm0", "1234")
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
# Act
with tempfile.TemporaryDirectory() as tmp_dir:
set_datadir(tmp_dir)
compiled_schedule = qcompile(
schedule_with_measurement, DEVICE_CFG, HARDWARE_MAPPING
)
prog = compiled_schedule["compiled_instructions"]
qcm.prepare(prog["qcm0"])
qrm.prepare(prog["qrm0"])
# Assert
qcm.instrument.arm_sequencer.assert_called_with(sequencer=0)
qrm.instrument.arm_sequencer.assert_called_with(sequencer=0)
def test_prepare_rf(
close_all_instruments, schedule_with_measurement_q2, make_qcm_rf, make_qrm_rf
):
# Arrange
qcm: qblox.PulsarQCMRFComponent = make_qcm_rf("qcm_rf0", "1234")
qrm: qblox.PulsarQRMRFComponent = make_qrm_rf("qrm_rf0", "1234")
# Act
with tempfile.TemporaryDirectory() as tmp_dir:
set_datadir(tmp_dir)
compiled_schedule = qcompile(
schedule_with_measurement_q2, DEVICE_CFG, HARDWARE_MAPPING
)
prog = compiled_schedule["compiled_instructions"]
qcm.prepare(prog["qcm_rf0"])
qrm.prepare(prog["qrm_rf0"])
# Assert
qcm.instrument.arm_sequencer.assert_called_with(sequencer=0)
qrm.instrument.arm_sequencer.assert_called_with(sequencer=0)
def test_prepare_exception_qcm(close_all_instruments, make_qcm):
# Arrange
qcm: qblox.PulsarQCMComponent = make_qcm("qcm0", "1234")
invalid_config = {"idontexist": "this is not used"}
# Act
with pytest.raises(KeyError) as execinfo:
qcm.prepare(invalid_config)
# Assert
assert execinfo.value.args[0] == (
"Invalid program. Attempting to access non-existing sequencer with"
' name "idontexist".'
)
def test_prepare_exception_qrm(close_all_instruments, make_qrm):
# Arrange
qrm: qblox.PulsarQRMComponent = make_qrm("qcm0", "1234")
invalid_config = {"idontexist": "this is not used"}
# Act
with pytest.raises(KeyError) as execinfo:
qrm.prepare(invalid_config)
# Assert
assert execinfo.value.args[0] == (
"Invalid program. Attempting to access non-existing sequencer with"
' name "idontexist".'
)
def test_prepare_exception_qcm_rf(close_all_instruments, make_qcm_rf):
# Arrange
qcm: qblox.PulsarQCMComponent = make_qcm_rf("qcm_rf0", "1234")
invalid_config = {"idontexist": "this is not used"}
# Act
with pytest.raises(KeyError) as execinfo:
qcm.prepare(invalid_config)
# Assert
assert execinfo.value.args[0] == (
"Invalid program. Attempting to access non-existing sequencer with"
' name "idontexist".'
)
def test_prepare_exception_qrm_rf(close_all_instruments, make_qrm_rf):
# Arrange
qrm: qblox.PulsarQRMComponent = make_qrm_rf("qcm_rf0", "1234")
invalid_config = {"idontexist": "this is not used"}
# Act
with pytest.raises(KeyError) as execinfo:
qrm.prepare(invalid_config)
# Assert
assert execinfo.value.args[0] == (
"Invalid program. Attempting to access non-existing sequencer with"
' name "idontexist".'
)
def test_is_running(make_qrm):
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
assert not qrm.is_running
def test_wait_done(make_qrm):
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
qrm.wait_done()
def test_retrieve_acquisition_qcm(close_all_instruments, make_qcm):
# Arrange
qcm: qblox.PulsarQCMComponent = make_qcm("qcm0", "1234")
# Act
acq = qcm.retrieve_acquisition()
# Assert
assert acq is None
def test_retrieve_acquisition_qrm(
close_all_instruments, schedule_with_measurement, make_qrm
):
# Arrange
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
# Act
with tempfile.TemporaryDirectory() as tmp_dir:
set_datadir(tmp_dir)
compiled_schedule = qcompile(
schedule_with_measurement, DEVICE_CFG, HARDWARE_MAPPING
)
prog = compiled_schedule["compiled_instructions"]
prog = dict(prog)
qrm.prepare(prog[qrm.instrument.name])
qrm.start()
acq = qrm.retrieve_acquisition()
# Assert
assert len(acq[(0, 0)]) == 2
def test_retrieve_acquisition_qcm_rf(close_all_instruments, make_qcm_rf):
# Arrange
qcm_rf: qblox.PulsarQCMRFComponent = make_qcm_rf("qcm_rf0", "1234")
# Act
acq = qcm_rf.retrieve_acquisition()
# Assert
assert acq is None
def test_retrieve_acquisition_qrm_rf(
close_all_instruments, schedule_with_measurement_q2, make_qrm_rf
):
# Arrange
qrm_rf: qblox.PulsarQRMComponent = make_qrm_rf("qrm_rf0", "1234")
# Act
with tempfile.TemporaryDirectory() as tmp_dir:
set_datadir(tmp_dir)
compiled_schedule = qcompile(
schedule_with_measurement_q2, DEVICE_CFG, HARDWARE_MAPPING
)
prog = compiled_schedule["compiled_instructions"]
prog = dict(prog)
qrm_rf.prepare(prog[qrm_rf.instrument.name])
qrm_rf.start()
acq = qrm_rf.retrieve_acquisition()
# Assert
assert len(acq[(0, 0)]) == 2
def test_start_qcm_qrm(
close_all_instruments, schedule_with_measurement, make_qcm, make_qrm
):
# Arrange
qcm: qblox.PulsarQCMComponent = make_qcm("qcm0", "1234")
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
# Act
with tempfile.TemporaryDirectory() as tmp_dir:
set_datadir(tmp_dir)
compiled_schedule = qcompile(
schedule_with_measurement, DEVICE_CFG, HARDWARE_MAPPING
)
prog = compiled_schedule["compiled_instructions"]
qcm.prepare(prog["qcm0"])
qrm.prepare(prog["qrm0"])
qcm.start()
qrm.start()
# Assert
qcm.instrument.start_sequencer.assert_called()
qrm.instrument.start_sequencer.assert_called()
def test_start_qcm_qrm_rf(
close_all_instruments, schedule_with_measurement_q2, make_qcm_rf, make_qrm_rf
):
# Arrange
qcm_rf: qblox.PulsarQCMRFComponent = make_qcm_rf("qcm_rf0", "1234")
qrm_rf: qblox.PulsarQRMRFComponent = make_qrm_rf("qrm_rf0", "1234")
# Act
with tempfile.TemporaryDirectory() as tmp_dir:
set_datadir(tmp_dir)
compiled_schedule = qcompile(
schedule_with_measurement_q2, DEVICE_CFG, HARDWARE_MAPPING
)
prog = compiled_schedule["compiled_instructions"]
qcm_rf.prepare(prog["qcm_rf0"])
qrm_rf.prepare(prog["qrm_rf0"])
qcm_rf.start()
qrm_rf.start()
# Assert
qcm_rf.instrument.start_sequencer.assert_called()
qrm_rf.instrument.start_sequencer.assert_called()
def test_stop_qcm_qrm(close_all_instruments, make_qcm, make_qrm):
# Arrange
qcm: qblox.PulsarQCMComponent = make_qcm("qcm0", "1234")
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
# Act
qcm.stop()
qrm.stop()
# Assert
qcm.instrument.stop_sequencer.assert_called()
qrm.instrument.stop_sequencer.assert_called()
def test_stop_qcm_qrm_rf(close_all_instruments, make_qcm, make_qrm):
# Arrange
qcm_rf: qblox.PulsarQCMRFComponent = make_qcm("qcm_rf0", "1234")
qrm_rf: qblox.PulsarQRMRFComponent = make_qrm("qrm_rf0", "1234")
# Act
qcm_rf.stop()
qrm_rf.stop()
# Assert
qcm_rf.instrument.stop_sequencer.assert_called()
qrm_rf.instrument.stop_sequencer.assert_called()
# ------------------- _QRMAcquisitionManager -------------------
def test_qrm_acquisition_manager__init__(make_qrm):
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
qblox._QRMAcquisitionManager(
qrm, qrm._hardware_properties.number_of_sequencers, dict(), None
)
def test_get_threshold_data(make_qrm, mock_acquisition_data):
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
acq_manager = qblox._QRMAcquisitionManager(
qrm, qrm._hardware_properties.number_of_sequencers, dict(), None
)
data = acq_manager._get_threshold_data(mock_acquisition_data, 0, 0)
assert data == 0.12
def test_get_integration_data(make_qrm, mock_acquisition_data):
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
acq_manager = qblox._QRMAcquisitionManager(
qrm, qrm._hardware_properties.number_of_sequencers, dict(), None
)
data = acq_manager._get_integration_data(mock_acquisition_data, acq_channel=0)
np.testing.assert_array_equal(data[0], np.array([0.0] * 10))
np.testing.assert_array_equal(data[1], np.array([0.0] * 10))
def test_get_scope_channel_and_index(make_qrm):
acq_mapping = {
qblox.AcquisitionIndexing(acq_index=0, acq_channel=0): ("seq0", "trace"),
}
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
acq_manager = qblox._QRMAcquisitionManager(
qrm, qrm._hardware_properties.number_of_sequencers, acq_mapping, None
)
result = acq_manager._get_scope_channel_and_index()
assert result == (0, 0)
def test_get_scope_channel_and_index_exception(make_qrm):
acq_mapping = {
qblox.AcquisitionIndexing(acq_index=0, acq_channel=0): ("seq0", "trace"),
qblox.AcquisitionIndexing(acq_index=1, acq_channel=0): ("seq0", "trace"),
}
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
acq_manager = qblox._QRMAcquisitionManager(
qrm, qrm._hardware_properties.number_of_sequencers, acq_mapping, None
)
with pytest.raises(RuntimeError) as execinfo:
acq_manager._get_scope_channel_and_index()
assert (
execinfo.value.args[0]
== "A scope mode acquisition is defined for both acq_channel 0 with "
"acq_index 0 as well as acq_channel 0 with acq_index 1. Only a single "
"trace acquisition is allowed per QRM."
)
def test_get_protocol(make_qrm):
answer = "trace"
acq_mapping = {
qblox.AcquisitionIndexing(acq_index=0, acq_channel=0): ("seq0", answer),
}
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
acq_manager = qblox._QRMAcquisitionManager(
qrm, qrm._hardware_properties.number_of_sequencers, acq_mapping, None
)
assert acq_manager._get_protocol(0, 0) == answer
def test_get_sequencer_index(make_qrm):
answer = 0
acq_mapping = {
qblox.AcquisitionIndexing(acq_index=0, acq_channel=0): (
f"seq{answer}",
"trace",
),
}
qrm: qblox.PulsarQRMComponent = make_qrm("qrm0", "1234")
acq_manager = qblox._QRMAcquisitionManager(
qrm, qrm._hardware_properties.number_of_sequencers, acq_mapping, None
)
assert acq_manager._get_sequencer_index(0, 0) == answer
| StarcoderdataPython |
3562977 | # Quantopian, Inc. licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
def get_bool(prompt, default=None):
if default is not None:
default = 'y' if default else 'n'
full_prompt = prompt
if default:
full_prompt += ' [{}]'.format(default)
full_prompt += ' '
answer = input(full_prompt)
if answer == '':
answer = default
if not answer:
return get_bool(prompt, default)
if answer.lower()[0] in ('y', 't', '1'):
return True
if answer.lower()[0] in ('n', 'f', '0'):
return False
return get_bool(prompt, default)
def get_int(prompt, default=None, minimum=None):
full_prompt = prompt
if default is not None:
full_prompt += ' [{}]'.format(default)
full_prompt += ' '
answer = input(full_prompt)
if answer == '':
answer = default
if answer is None:
return get_int(prompt, default, minimum)
try:
answer = int(answer)
if minimum is not None and answer < minimum:
print('The minimum is {}'.format(minimum))
return get_int(prompt, default, minimum)
return answer
except Exception:
return get_int(prompt, default, minimum)
def get_string(prompt, default=None, none_ok=False):
full_prompt = prompt
if default:
full_prompt += ' [{}]'.format(default)
if none_ok:
full_prompt += ' [enter "none" to clear]'
full_prompt += ' '
answer = input(full_prompt)
if answer == '':
answer = default
if answer == 'none':
answer = ''
if answer is None:
return get_string(prompt, default)
return answer
def get_string_or_list(prompt, default=None):
so_far = []
while True:
full_prompt = prompt
if default and not so_far:
if isinstance(default, str):
full_prompt += ' [{}]'.format(default)
else:
full_prompt += ' [{}]'.format(', '.join(default))
if so_far:
full_prompt += ' [hit Enter when finished]'
full_prompt += ' '
answer = input(full_prompt)
if not answer:
if so_far:
# Canonicalize for comparison purposes.
so_far.sort()
return so_far
answer = default
if answer is None:
return get_string_or_list(prompt, default)
if isinstance(answer, str):
so_far.append(answer)
else:
so_far.extend(answer)
| StarcoderdataPython |
3463920 | # https://leetcode.com/problems/remove-duplicate-letters/
class Solution:
def removeDuplicateLetters(self, s: str) -> str:
char_stack, covered_chars, last_idx_of_char = list(), set(), dict()
for idx, char in enumerate(s):
last_idx_of_char[char] = idx
for idx, char in enumerate(s):
if char not in covered_chars:
while (char_stack and char_stack[-1] > char
and last_idx_of_char[char_stack[-1]] > idx):
covered_chars.remove(char_stack.pop())
char_stack.append(char)
covered_chars.add(char)
return ''.join(char_stack)
| StarcoderdataPython |
296493 | #!/usr/bin/env python
""" run as `python -O level_10.py` to disable debug garbage """
from itertools import cycle, islice
input = [106, 16, 254, 226, 55, 2, 1, 166, 177, 247, 93, 0, 255, 228, 60, 36]
indata = list(range(0, 256))
# Test data
# input = [1, 2, 3]
# indata = list(range(0, 256))
skip = 0
def rotlist(list, by):
pool = cycle(list)
skipedPos = islice(pool, by, None)
nextState = []
for i in range(len(indata)):
nextState.append(next(skipedPos))
if __debug__: print("Skipped {}: {}".format(by, nextState))
return nextState
input2 = []
if len(input) > 0:
for i in input[0:-1]:
input2 += list(map(ord, str(i)))
input2.append(44)
input2 += list(map(ord, str(input[-1])))
input2 += (17, 31, 73, 47, 23)
print(input2)
totalForward = 0
for r in range(0, 64):
for i in input2:
if __debug__: print("Original: {}".format(indata))
indata[0:i] = indata[0:i][::-1]
if __debug__: print("Reversed {} elements: {}".format(i, indata))
indata = rotlist(indata, i + skip)
totalForward += i + skip
skip += 1
if __debug__: print("Skip now is: {}".format(skip))
if totalForward > len(indata):
totalForward = totalForward % len(indata)
if __debug__: print("Total forward from original: {}".format(totalForward))
origProjection = list(indata)
origProjection = rotlist(origProjection, len(indata) - totalForward)
if __debug__: print("Result ({}): {}".format(len(origProjection), origProjection))
dense = []
for x in range(0, 16):
bte = origProjection[x * 16]
for y in range(1, 16):
bte ^= origProjection[x * 16 + y]
dense.append(bte)
print(dense)
result2 = ""
for i in dense:
result2 += format(i, '02x')
print("Result2: {}".format(result2))
u"""
--- Part Two ---
The logic you've constructed forms a single round of the Knot Hash algorithm; running the full thing requires many of these rounds. Some input and output processing is also required.
First, from now on, your input should be taken not as a list of numbers, but as a string of bytes instead. Unless otherwise specified, convert characters to bytes using their ASCII codes. This will allow you to handle arbitrary ASCII strings, and it also ensures that your input lengths are never larger than 255. For example, if you are given 1,2,3, you should convert it to the ASCII codes for each character: 49,44,50,44,51.
Once you have determined the sequence of lengths to use, add the following lengths to the end of the sequence: 17, 31, 73, 47, 23. For example, if you are given 1,2,3, your final sequence of lengths should be 49,44,50,44,51,17,31,73,47,23 (the ASCII codes from the input string combined with the standard length suffix values).
Second, instead of merely running one round like you did above, run a total of 64 rounds, using the same length sequence in each round. The current position and skip size should be preserved between rounds. For example, if the previous example was your first round, you would start your second round with the same length sequence (3, 4, 1, 5, 17, 31, 73, 47, 23, now assuming they came from ASCII codes and include the suffix), but start with the previous round's current position (4) and skip size (4).
Once the rounds are complete, you will be left with the numbers from 0 to 255 in some order, called the sparse hash. Your next task is to reduce these to a list of only 16 numbers called the dense hash. To do this, use numeric bitwise XOR to combine each consecutive block of 16 numbers in the sparse hash (there are 16 such blocks in a list of 256 numbers). So, the first element in the dense hash is the first sixteen elements of the sparse hash XOR'd together, the second element in the dense hash is the second sixteen elements of the sparse hash XOR'd together, etc.
For example, if the first sixteen elements of your sparse hash are as shown below, and the XOR operator is ^, you would calculate the first output number like this:
65 ^ 27 ^ 9 ^ 1 ^ 4 ^ 3 ^ 40 ^ 50 ^ 91 ^ 7 ^ 6 ^ 0 ^ 2 ^ 5 ^ 68 ^ 22 = 64
Perform this operation on each of the sixteen blocks of sixteen numbers in your sparse hash to determine the sixteen numbers in your dense hash.
Finally, the standard way to represent a Knot Hash is as a single hexadecimal string; the final output is the dense hash in hexadecimal notation. Because each number in your dense hash will be between 0 and 255 (inclusive), always represent each number as two hexadecimal digits (including a leading zero as necessary). So, if your first three numbers are 64, 7, 255, they correspond to the hexadecimal numbers 40, 07, ff, and so the first six characters of the hash would be 4007ff. Because every Knot Hash is sixteen such numbers, the hexadecimal representation is always 32 hexadecimal digits (0-f) long.
Here are some example hashes:
The empty string becomes a2582a3a0e66e6e86e3812dcb672a272.
AoC 2017 becomes 33efeb34ea91902bb2f59c9920caa6cd.
1,2,3 becomes 3efbe78a8d82f29979031a4aa0b16a9d.
1,2,4 becomes 63960835bcdc130f0b66d7ff4f6a5a8e.
Treating your puzzle input as a string of ASCII characters, what is the Knot Hash of your puzzle input? Ignore any leading or trailing whitespace you might encounter.
"""
| StarcoderdataPython |
11258699 | # apis_v1/documentation_source/organization_search_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def organization_search_doc_template_values(url_root):
"""
Show documentation about organizationSearch
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
{
'name': 'organization_name',
'value': 'string', # boolean, integer, long, string
'description': 'Name of the organization that is displayed.',
},
{
'name': 'organization_email',
'value': 'string', # boolean, integer, long, string
'description': 'Contact email of the organization.',
},
{
'name': 'organization_website',
'value': 'string', # boolean, integer, long, string
'description': 'Website of the organization.',
},
{
'name': 'organization_twitter_handle',
'value': 'string', # boolean, integer, long, string
'description': 'Twitter handle of the organization.',
},
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
{
'code': 'ORGANIZATION_SEARCH_ALL_TERMS_MISSING',
'description': 'Cannot proceed. No search terms were provided.',
},
{
'code': 'ORGANIZATIONS_RETRIEVED',
'description': 'Successfully returned a list of organizations that match search query.',
},
{
'code': 'NO_ORGANIZATIONS_RETRIEVED',
'description': 'Successfully searched, but no organizations found that match search query.',
},
]
try_now_link_variables_dict = {
# 'organization_we_vote_id': 'wv85org1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "organization_email": string (the original search term passed in),\n' \
' "organization_name": string (the original search term passed in),\n' \
' "organization_twitter_handle": string (the original search term passed in),\n' \
' "organization_website": string (the original search term passed in),\n' \
' "organizations_list": list\n' \
' [\n' \
' "organization_id": integer,\n' \
' "organization_we_vote_id": string,\n' \
' "organization_name": string,\n' \
' "organization_twitter_handle": string,\n' \
' "organization_facebook": string,\n' \
' "organization_email": string,\n' \
' "organization_website": string,\n' \
' ],\n' \
'}'
template_values = {
'api_name': 'organizationSearch',
'api_slug': 'organizationSearch',
'api_introduction':
"Find a list of all organizations that match any of the search terms.",
'try_now_link': 'apis_v1:organizationSearchView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
| StarcoderdataPython |
12860487 | <gh_stars>10-100
import unittest
from mock import patch
from starter.starter_AdminEmail import starter_AdminEmail
from tests.activity.classes_mock import FakeLogger
from tests.classes_mock import FakeLayer1
import tests.settings_mock as settings_mock
class TestStarterAdminEmail(unittest.TestCase):
def setUp(self):
self.fake_logger = FakeLogger()
self.starter = starter_AdminEmail(settings_mock, logger=self.fake_logger)
@patch("boto.swf.layer1.Layer1")
def test_start(self, fake_conn):
fake_conn.return_value = FakeLayer1()
self.assertIsNone(self.starter.start(settings_mock))
@patch("boto.swf.layer1.Layer1")
def test_start_workflow(self, fake_conn):
fake_conn.return_value = FakeLayer1()
self.assertIsNone(self.starter.start_workflow())
| StarcoderdataPython |
3346441 | from Lixadeira import Lixadeira
from src.Furadeira import Furadeira
estoque = []
def mostramenu():
print('''
________________________________
| LOJA DE FERRAMENTAS |
| Menu |
| 0 - Sair |
| 1 - Cadastrar |
--------------------------------''')
try:
opcao = int(input("Escolha: "))
return opcao
except ValueError:
print("Somente dígitos!")
return mostramenu()
def cadastrar():
result = tipoFerramenta()
if result == 1:
cadastrarLixadeira()
elif result == 2:
cadastrarFuradeira()
else:
print("Opcao invalida")
cadastrar()
def tipoFerramenta():
try:
opcao = int(input("Que tipo de ferramenta deseja cadastrar? [1 Lixadeira/ 2 Furadeira]"))
return opcao
except ValueError:
print("Somente dígitos!")
return tipoFerramenta()
def cadastrarLixadeira():
nome = input("Nome: ")
tensao = input("Tensao:")
preco = input("Preço:")
potencia = input("Rotacoes: ")
lixadeira = Furadeira(nome, tensao, preco, potencia)
estoque.append(lixadeira)
lixadeira.getInformacoes()
def cadastrarFuradeira():
nome = input("Nome: ")
tensao = input("Tensao:")
preco = input("Preço:")
rotacoes = input("Rotacoes")
furadeira = Lixadeira(nome, tensao, preco, rotacoes)
estoque.append(furadeira)
furadeira.getInformacoes()
if __name__ == '__main__':
while True:
escolha = mostramenu()
if escolha == 0:
print("Obrigado! Volte Sempre!")
break
elif escolha == 1:
cadastrar()
else:
print("Escolha uma opção válida do Menu.")
| StarcoderdataPython |
1868858 | <gh_stars>0
import os
from commons.config.settings import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS.remove('django_extensions')
# Application definition
INSTALLED_APPS += []
ROOT_URLCONF = 'commons.config.urls'
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'app',
'PASSWORD': 'app',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
if os.environ.get('GITHUB_WORKFLOW'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'app',
'PASSWORD': 'app',
'HOST': '127.0.0.1',
'PORT': '5432',
}
} | StarcoderdataPython |
6449177 | #!/usr/bin/env python
# simple Python test program (printmess.py)
from __future__ import print_function
print('test message')
| StarcoderdataPython |
1838537 | import json
import copy
import pyblish.api
from openpype.lib import get_subset_name_with_asset_doc
from openpype.pipeline import legacy_io
class CollectRenderScene(pyblish.api.ContextPlugin):
"""Collect instance which renders whole scene in PNG.
Creates instance with family 'renderScene' which will have all layers
to render which will be composite into one result. The instance is not
collected from scene.
Scene will be rendered with all visible layers similar way like review is.
Instance is disabled if there are any created instances of 'renderLayer'
or 'renderPass'. That is because it is expected that this instance is
used as lazy publish of TVPaint file.
Subset name is created similar way like 'renderLayer' family. It can use
`renderPass` and `renderLayer` keys which can be set using settings and
`variant` is filled using `renderPass` value.
"""
label = "Collect Render Scene"
order = pyblish.api.CollectorOrder - 0.39
hosts = ["tvpaint"]
# Value of 'render_pass' in subset name template
render_pass = "beauty"
# Settings attributes
enabled = False
# Value of 'render_layer' and 'variant' in subset name template
render_layer = "Main"
def process(self, context):
# Check if there are created instances of renderPass and renderLayer
# - that will define if renderScene instance is enabled after
# collection
any_created_instance = False
for instance in context:
family = instance.data["family"]
if family in ("renderPass", "renderLayer"):
any_created_instance = True
break
# Global instance data modifications
# Fill families
family = "renderScene"
# Add `review` family for thumbnail integration
families = [family, "review"]
# Collect asset doc to get asset id
# - not sure if it's good idea to require asset id in
# get_subset_name?
workfile_context = context.data["workfile_context"]
asset_name = workfile_context["asset"]
asset_doc = legacy_io.find_one({
"type": "asset",
"name": asset_name
})
# Project name from workfile context
project_name = context.data["workfile_context"]["project"]
# Host name from environment variable
host_name = context.data["hostName"]
# Variant is using render pass name
variant = self.render_layer
dynamic_data = {
"renderlayer": self.render_layer,
"renderpass": self.render_pass,
}
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
dynamic_data["render_pass"] = dynamic_data["renderpass"]
task_name = workfile_context["task"]
subset_name = get_subset_name_with_asset_doc(
"render",
variant,
task_name,
asset_doc,
project_name,
host_name,
dynamic_data=dynamic_data
)
instance_data = {
"family": family,
"families": families,
"fps": context.data["sceneFps"],
"subset": subset_name,
"name": subset_name,
"label": "{} [{}-{}]".format(
subset_name,
context.data["sceneMarkIn"] + 1,
context.data["sceneMarkOut"] + 1
),
"active": not any_created_instance,
"publish": not any_created_instance,
"representations": [],
"layers": copy.deepcopy(context.data["layersData"]),
"asset": asset_name,
"task": task_name,
# Add render layer to instance data
"renderlayer": self.render_layer
}
instance = context.create_instance(**instance_data)
self.log.debug("Created instance: {}\n{}".format(
instance, json.dumps(instance.data, indent=4)
))
| StarcoderdataPython |
4945485 | #! /usr/bin/env python3
import argparse, build_utils, common, os, sys, shutil, package
def main() -> int:
jars = package.main()
os.chdir(common.basedir)
for jar in jars:
build_utils.deploy(jar, tempdir = "target/deploy")
return build_utils.release()
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
6588140 | <filename>project/forms.py
from django import forms
from .models import Profile, Post, Rating
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude=["user"]
class PostForm(forms.ModelForm):
class Meta:
model = Post
exclude = []
class RatingsForm(forms.ModelForm):
class Meta:
model = Rating
fields = ['design', 'usability', 'content','creativity']
exclude=['overall_score','profile','post'] | StarcoderdataPython |
6445900 | <gh_stars>1-10
"""
Adapted from https://github.com/kuangliu/pytorch-fpn/blob/master/fpn.py
"""
from torch import nn
from models import register_model
from models.backbone.resnet import GroupnormBackbone, BottleneckGroup
import torch.nn.functional as F
@register_model("fpn_pareto")
class FpnMultiHead(GroupnormBackbone):
"""
Multiple layer4 outputs, one feature pyramid output
"""
def __init__(self, block, layers, ncpg, stride, num_heads, output_dim=256):
super().__init__(block, layers, ncpg, stride)
self.output_dim = output_dim
self.num_stages = 4 # pyramid stages
self.layer4 = nn.ModuleList()
self.num_heads = num_heads
for _ in range(num_heads):
self.inplanes = 256 * block.expansion
layer4 = self._make_layer(block, 512, layers[3], stride=stride, group_norm=ncpg)
self.layer4.append(layer4)
# Top layer
self.toplayer = nn.Conv2d(2048, output_dim, kernel_size=1, stride=1, padding=0) # Reduce channels
# Smooth layers
self.smooth1 = nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=1, padding=1)
self.smooth2 = nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=1, padding=1)
self.smooth3 = nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=1, padding=1)
# Lateral layers
self.latlayer1 = nn.Conv2d(1024, output_dim, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d( 512, output_dim, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d( 256, output_dim, kernel_size=1, stride=1, padding=0)
def _upsample_add(self, x, y):
'''
Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.upsample(x, size=(H,W), mode='bilinear') + y
def forward(self, data, endpoints, detached=True):
device = next(self.parameters()).device
x = data['img'].to(device, non_blocking=True)
c1 = self.conv1(x)
c1 = self.bn1(c1)
c1 = self.relu(c1)
c1 = self.maxpool(c1)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
rep = c4
if detached:
# version two of pareto loss needs this
# forwards always with default detached = True
endpoints['grad_rep'] = rep
rep = rep.clone().detach().requires_grad_(True)
endpoints['detached_rep'] = rep
else:
# clean up
endpoints['grad_rep'] = None
endpoints['detached_rep'] = None
head_x = []
for layer4 in self.layer4:
head_x.append(layer4(rep))
# PN does not have one common
# representatino
# Only using c4 for now
# Top-down
p5 = self.toplayer(head_x[0])
p4 = self._upsample_add(p5, self.latlayer1(rep))
p3 = self._upsample_add(p4, self.latlayer2(c3))
p2 = self._upsample_add(p3, self.latlayer3(c2))
# Smooth
p4 = self.smooth1(p4)
p3 = self.smooth2(p3)
p2 = self.smooth3(p2)
if self.num_heads == 1:
return (p2, p3, p4, p5), head_x[0]
else:
return (p2, p3, p4, p5), head_x[1:]
@staticmethod
def build(cfg):
stride = cfg['stride']
# backwards compability
if 'ncpg' in cfg:
ncpg = cfg['ncpg']
else:
ncpg = cfg['num_groups']
num_heads = cfg['num_heads']
output_dim = cfg['output_dim']
model = FpnMultiHead(BottleneckGroup, [3, 4, 6, 3], ncpg, stride, num_heads, output_dim)
duplicate = []
layer4_duplicates = []
for idx in range(num_heads):
layer4_duplicates.append("layer4.{}".format(idx))
duplicate.append(("layer4", layer4_duplicates))
skips = ["fc", "layer4"]
return model, skips, duplicate
| StarcoderdataPython |
382173 | """
This module contains unit tests, for the most important functions of
ruspy.estimation.estimation_cost_parameters. The values to compare the results with
are saved in resources/estimation_test. The setting of the test is documented in the
inputs section in test module.
"""
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from ruspy.config import TEST_RESOURCES_DIR
from ruspy.estimation.estimation_transitions import create_transition_matrix
from ruspy.model_code.choice_probabilities import choice_prob_gumbel
from ruspy.model_code.cost_functions import calc_obs_costs
from ruspy.model_code.cost_functions import lin_cost
from ruspy.model_code.fix_point_alg import calc_fixp
from ruspy.test.ranodm_init import random_init
@pytest.fixture
def inputs():
out = {}
out["nstates"] = 90
out["cost_fct"] = lin_cost
out["params"] = np.array([10, 2])
out["trans_prob"] = np.array([0.2, 0.3, 0.15, 0.35])
out["disc_fac"] = 0.9999
return out
@pytest.fixture
def outputs():
out = {}
out["costs"] = np.loadtxt(TEST_RESOURCES_DIR + "estimation_test/myop_cost.txt")
out["trans_mat"] = np.loadtxt(TEST_RESOURCES_DIR + "estimation_test/trans_mat.txt")
out["fixp"] = np.loadtxt(TEST_RESOURCES_DIR + "estimation_test/fixp.txt")
out["choice_probs"] = np.loadtxt(
TEST_RESOURCES_DIR + "estimation_test/choice_prob.txt"
)
return out
def test_cost_func(inputs, outputs):
assert_array_almost_equal(
calc_obs_costs(inputs["nstates"], inputs["cost_fct"], inputs["params"], 0.001),
outputs["costs"],
)
def test_create_trans_mat(inputs, outputs):
assert_array_almost_equal(
create_transition_matrix(inputs["nstates"], inputs["trans_prob"]),
outputs["trans_mat"],
)
def test_fixp(inputs, outputs):
assert_array_almost_equal(
calc_fixp(outputs["trans_mat"], outputs["costs"], inputs["disc_fac"])[0],
outputs["fixp"],
)
def test_choice_probs(inputs, outputs):
assert_array_almost_equal(
choice_prob_gumbel(outputs["fixp"], outputs["costs"], inputs["disc_fac"]),
outputs["choice_probs"],
)
def test_trans_mat_rows_one():
rand_dict = random_init()
control = np.ones(rand_dict["estimation"]["states"])
assert_array_almost_equal(
create_transition_matrix(
rand_dict["estimation"]["states"],
np.array(rand_dict["simulation"]["known_trans"]),
).sum(axis=1),
control,
)
| StarcoderdataPython |
1731074 | <reponame>discardthree/PyQPECgen<filename>qpecgen/box.py
# pylint: disable=unused-variable
#
# qpecgen/box.py
#
# Copyright (c) 2016 <NAME>
#
# This software is released under the MIT License.
#
# http://opensource.org/licenses/mit-license.php
#
from __future__ import absolute_import
from cvxopt import matrix
from qpecgen.base import QpecgenProblem
from qpecgen.helpers import choose_num, rand, conmat, randint, npvec, zeros
from qpecgen.helpers import randcst, create_name, eye
class Qpecgen200(QpecgenProblem):
'''
Qpecgen200 generates and manages the data for a qpecgen problem of type 200
(BOX-QPEC). In addition to the methods used in problem construction, the
make_QPCCProblem() method can be used to export the problem as a QpecgenQPCC,
which is a BasicQPCC object in which some qpecgen specific data is also
preserved.
'''
def __init__(self, pname, param, qpec_type=200):
super(Qpecgen200, self).__init__(pname, param, qpec_type=qpec_type)
# Generate xgen
self.info = {}
self._make_info()
self._make_xgen()
self.u = None # just initializing
self._make_ygen()
self._make_a_ulambda()
self._make_F_pi_sigma_index()
self._make_c_d()
def _make_info(self):
'''
Randomly allots statuses to constraints (active, nonactive, degenerate).
The 'l' series gives the allotment for upper level constraints Ax <= 0.
The 'ms' series determines what happens with the single-bounded lower
level variables. The 'md' series determines what happens with the
double-bounded lower level variables.
Since the ordering of variables and constraints within these groups is
not significant, the generator just determines how many will have a
given status and then the first l_deg upper level constraints will be
degenerate, the next l_non will be nonactive, etc.
'''
# Decide how many of the non-degenerate first level ctrs should be
# nonactive
l = self.param['l']
l_deg = self.param['first_deg']
l_nonactive = choose_num(l - l_deg)
l_active = l - l_deg - l_nonactive
self.info.update({
'l': l,
'l_deg': l_deg,
'l_nonactive': l_nonactive,
'l_active': l_active})
m = self.param['m']
second_deg = self.param['second_deg']
md = 1 + choose_num(m - 2)
ms = m - md
# single bounded y variables (only bounded below):
ms_deg = max(choose_num(min(ms, second_deg)),
second_deg - m + ms)
ms_nonactive = choose_num(ms - ms_deg)
ms_active = ms - ms_deg - ms_nonactive
self.info.update({
'm': m,
'ms': ms,
'ms_deg': ms_deg, # F=0, y=0
'ms_nonactive': ms_nonactive, # F>0, y=0
'ms_active': ms_active}) # F=0, y>0
# divvy up degeneracy numbers so there are second_deg degenerate y vars
remaining_degen = second_deg - self.info['ms_deg']
md_upp_deg = choose_num(remaining_degen) # F=0, y=u
md_low_deg = remaining_degen - md_upp_deg # F=0, y=0
self.info.update({
'md': md,
'md_upp_deg': md_upp_deg,
'md_low_deg': md_low_deg})
# double bounded y variables (bounded below and above):
md_nondegen = md - self.info['md_upp_deg'] - self.info['md_low_deg']
md_upp_non = choose_num(md_nondegen) # F<0, y=u
md_low_non = choose_num(md_nondegen - md_upp_non) # F>0, y=0
md_float = md_nondegen - md_upp_non - md_low_non # F=0, 0<y<u
self.info.update({
'md_upp_nonactive': md_upp_non,
'md_low_nonactive': md_low_non,
'md_float': md_float})
info = self.info
param = self.param
assert self.m == info['ms'] + info['md']
assert info['ms'] == info['ms_deg'] + \
info['ms_active'] + info['ms_nonactive']
assert info['md'] == info['md_upp_deg'] + info['md_upp_nonactive'] + \
info['md_low_deg'] + info['md_low_nonactive'] + \
info['md_float']
assert param['second_deg'] == info['ms_deg'] + info['md_upp_deg'] + \
info['md_low_deg']
assert info['ms_deg'] >= 0
assert info['ms_active'] >= 0
assert info['ms_nonactive'] >= 0
assert info['md_upp_deg'] >= 0
assert info['md_upp_nonactive'] >= 0
assert info['md_low_deg'] >= 0
assert info['md_low_nonactive'] >= 0
assert info['md_float'] >= 0
def _make_xgen(self):
self.info['xgen'] = 10 * (rand(self.n) - rand(self.n))
def _make_u(self):
self.u = 10. * rand(self.info['md'])
def _make_ygen(self):
self._make_u()
num_double_at_upper = self.info[
'md_upp_deg'] + self.info['md_upp_nonactive']
num_not_floating = self.info['md'] - self.info['md_float']
double_at_upper = npvec(self.u[:num_double_at_upper])
double_at_lower = zeros(
self.info['md_low_deg'] + self.info['md_low_nonactive'])
double_floating = npvec(
[randcst() * x for x in self.u[num_not_floating:]])
single_at_lower = zeros(
self.info['ms_deg'] + self.info['ms_nonactive'])
single_floating = rand(self.info['ms_active'])
# ygen = conmat([npvec(u[:m_upp_deg+upp_nonactive]), # double_at_upper
# zeros(m_low_deg+low_nonactive), # double_at_lower
# v2, # double_floating
# zeros(m_inf_deg+inf_nonactive), # single_at_lower
# rand(m_inf-m_inf_deg-inf_nonactive)]) # single_floating
self.info['ygen'] = conmat([
double_at_upper, # y=u cases
double_at_lower, # y=0 cases
double_floating, # 0<y<u cases
single_at_lower, # y=0 cases
single_floating]) # y>0 cases
for yi in self.info['ygen']:
assert yi >= 0
for i in range(self.info['md']):
assert self.info['ygen'][i] <= self.u[i]
def _make_a_ulambda(self):
xgen = self.info['xgen']
ygen = self.info['ygen']
# FIRST LEVEL CTRS A[x;y] + a <= 0
# Generate the first level multipliers ulambda associated with A*[x;y]+a<=0.
# Generate a so the constraints Ax+a <= 0 are loose or tight where
# appropriate.
Axy = self.A * conmat([xgen, ygen])
self.a = -Axy - conmat([
zeros(self.info['l_deg']), # A + a = 0
rand(self.info['l_nonactive']), # A + a = 0
zeros(self.info['l_active'])]) # A + a <=0
self.info['ulambda'] = conmat([
zeros(self.info['l_deg']),
zeros(self.info['l_nonactive']),
rand(self.info['l_active'])])
def _make_F_pi_sigma_index(self):
N = self.N
M = self.M
u = self.u
xgen = self.info['xgen']
ygen = self.info['ygen']
m = self.param['m']
# Design q so that Nx + My + E^Tlambda + q = 0 at the solution (xgen,
# ygen)
q = -N * xgen - M * ygen
q += conmat([
# double bounded, degenerate at upper
zeros(self.info['md_upp_deg']),
# double bounded, nonactive at upper
-rand(self.info['md_upp_nonactive']),
# double bounded, degenerate at lower
zeros(self.info['md_low_deg']),
# double bounded, nonactive at lower
rand(self.info['md_low_nonactive']),
zeros(self.info['md_float']), # double bounded, floating
# single bounded, degenerate at lower
zeros(self.info['ms_deg']),
# single bounded, nonactive at lower
rand(self.info['ms_nonactive']),
zeros(self.info['ms_active'])]) # single bounded, floating
#########################################
## For later convenience ##
#########################################
F = N * xgen + M * ygen + q
mix_deg = self.param['mix_deg']
tol_deg = self.param['tol_deg']
# Calculate three index sets alpha, beta and gamma at (xgen, ygen).
# alpha denotes the index set of i at which F(i) is active, but y(i) not.
# beta_upp and beta_low denote the index sets of i at which F(i) is
# active, and y(i) is active at the upper and the lower end point of
# the finite interval [0, u] respectively.
# beta_inf denotes the index set of i at which both F(i) and y(i) are
# active for the infinite interval [0, inf).
# gamma_upp and gamma_low denote the index sets of i at which F(i) is
# not active, but y(i) is active at the upper and the lower point of
# the finite interval [0, u] respectively.
# gamma_inf denotes the index set of i at which F(i) is not active, but y(i)
# is active for the infinite interval [0, inf).
index = []
md = self.info['md']
for i in range(md):
assert ygen[i] >= -tol_deg and ygen[i] < u[i] + tol_deg, \
"{0} not in [0, {1}]".format(ygen[i], u[i])
if abs(F[i]) <= tol_deg and ygen[i] > tol_deg and ygen[i] + tol_deg < u[i]:
index.append(1) # For the index set alpha.
elif abs(F[i]) <= tol_deg and abs(ygen[i] - u[i]) <= tol_deg:
index.append(2) # For the index set beta_upp.
elif abs(F[i]) <= tol_deg and abs(ygen[i]) <= tol_deg:
index.append(3) # For the index set beta_low.
elif F[i] < -tol_deg and abs(ygen[i] - u[i]) <= tol_deg:
index.append(-1) # For the index set gamma_upp.
elif F[i] > tol_deg and abs(ygen[i]) <= tol_deg:
index.append(-1) # For the index set gamma_low.
else:
raise Exception(("didn't know what to do with this case: "
"ygen={0}, u[i] = {1}, F[i]={2}").format(
ygen[i], u[i], F[i]))
for i in range(md, m):
if ygen[i] > F[i] + tol_deg:
index.append(1) # For the index set alpha.
elif abs(ygen[i] - F[i]) <= tol_deg:
index.append(4) # For the index set beta_inf.
else:
index.append(-1) # For the index set gamma_inf.
# Generate the first level multipliers pi sigma
# associated with other constraints other than the first level constraints
# A*[x;y]+a<=0 in the relaxed nonlinear program. In particular,
# pi is associated with F(x, y)=N*x+M*y+q, and
# sigma with y.
mix_upp_deg = max(
mix_deg - self.info['md_low_deg'] - self.info['ms_deg'],
choose_num(self.info['md_upp_deg']))
mix_upp_deg = min(mix_upp_deg, mix_deg)
mix_low_deg = max(
mix_deg - mix_upp_deg - self.info['ms_deg'],
choose_num(self.info['md_low_deg']))
mix_low_deg = min(mix_low_deg, mix_deg - mix_upp_deg)
mix_inf_deg = mix_deg - mix_upp_deg - mix_low_deg
mix_inf_deg = min(mix_inf_deg, mix_deg - mix_upp_deg - mix_inf_deg)
assert mix_deg >= 0
assert mix_upp_deg >= 0
assert mix_low_deg >= 0
assert mix_inf_deg >= 0
# assert self.param['second_deg'] == self.info['ms_deg'] +
# self.info['md_low_deg'] + self.info['md_upp_deg'] + mix_deg
k_mix_inf = 0
k_mix_upp = 0
k_mix_low = 0
pi = zeros(m, 1)
sigma = zeros(m, 1)
for i in range(m):
if index[i] == 1:
pi[i] = randcst() - randcst()
sigma[i] = 0
elif index[i] == 2:
if k_mix_upp < mix_upp_deg:
pi[i] = 0
# The first mix_upp_deg constraints associated with F(i)<=0
# in the set beta_upp are degenerate.
sigma[i] = 0
# The first mix_upp_deg constraints associated with
# y(i)<=u(i) in the set beta_upp are degenerate.
k_mix_upp = k_mix_upp + 1
else:
pi[i] = randcst()
sigma[i] = randcst()
elif index[i] == 3:
if k_mix_low < mix_low_deg:
pi[i] = 0
# The first mix_low_deg constraints associated with F(i)>=0
# in the set beta_low are degenerate.
sigma[i] = 0
# The first mix_low_deg constraints associated with
# y(i)>=0 in the set beta_low are degenerate.
k_mix_low = k_mix_low + 1
else:
pi[i] = -randcst()
sigma[i] = -randcst()
elif index[i] == 4:
if k_mix_inf < mix_inf_deg:
pi[i] = 0
# The first mix_inf_deg constraints associated with F(i)>=0
# in the set beta_inf are degenerate.
sigma[i] = 0
# The first mix_inf_deg constraints associated with
# y(i)>=0 in the set beta_inf are degenerate.
k_mix_inf = k_mix_inf + 1
else:
pi[i] = -randcst()
sigma[i] = -randcst()
else:
pi[i] = 0
sigma[i] = randcst() - randcst()
self.q = q
self.info.update({
'F': F,
'mix_upp_deg': mix_upp_deg,
'mix_low_deg': mix_low_deg,
'mix_inf_deg': mix_inf_deg,
'pi': pi,
'sigma': sigma})
def _make_c_d(self):
n = self.param['n']
m = self.param['m']
# Generate coefficients of the linear part of the objective
# Generate c and d such that (xgen, ygen) satisfies KKT conditions
# of AVI-MPEC as well as the first level degeneracy.
Px = self.get_Px()
Pxy = self.get_Pxy()
Py = self.get_Py()
xgen = self.info['xgen']
ygen = self.info['ygen']
pi = self.info['pi']
sigma = self.info['sigma']
self.c = -(Px * xgen + Pxy * ygen + self.N.T * pi)
self.d = -(Pxy.T * xgen + Py * ygen + self.M.T * pi + sigma)
if self.param['l'] > 0:
self.c += -(self.A[:, :n].T * self.info['ulambda'])
self.d += -(self.A[:, n:m + n].T * self.info['ulambda'])
# ELEPHANT reinstate later
# self.info['optval'] = (0.5*(self.info['optsol'].T)*self.P*self.info['optsol']
# +conmat([self.c, self.d]).T*self.info['optsol'])[0,0]
def make_QPCC_sol(self):
lamDL, lamS, lamDU = self.get_dual_vals(
self.info['xgen'], self.info['ygen'])
self.info.update({
'lamDL': lamDL,
'lamS': lamS,
'lamDU': lamDU})
gensol = conmat([
self.info['xgen'],
self.info['ygen'],
lamDL,
lamS,
lamDU])
return gensol
def get_dual_vals(self, x, y):
"""
Computes the values of the lower level problem's dual variables
vectors at the given solution (x, y).
Args:
x, y: an optimal solution to the QPEC.
Returns:
:math:`\lambda_D^L`: vector of dual variable values for the
constraints :math:`y_D \geq 0`
:math:`\lambda_S`: vector of dual variable values for the
constraints :math:`y_S \geq 0`
:math:`\lambda_D^U`: vector of dual variable values for the
constraints :math:`y_D \leq y_u`
"""
# computing the full sol at xgen, ygen
md = self.info['md']
ms = self.info['ms']
lamDL = zeros(md)
lamDU = zeros(md)
lamS = zeros(ms)
ETlambda = -self.N * x - self.M * y - self.q
assert ms + md == len(ETlambda)
for i in range(md):
if ETlambda[i] >= 0:
lamDL[i] = 0.
lamDU[i] = ETlambda[i]
else:
lamDL[i] = -ETlambda[i]
lamDU[i] = 0.
# assert lamDL[i] - lamDU[i] == ETlambda[i]
# print lamS
for i in range(ms):
lamS[i] = -ETlambda[md + i]
# assert lamS[i] == ETlambda[md+i]
# assert lamS[i] >= 0, "if this goes wrong it's because this part of
# q wasn't generated right!"
# E1 = conmat([eye(md), zeros(md, ms), -eye(md)], option='h')
# E2 = conmat([zeros(ms, md), eye(ms), zeros(ms, md)], option='h')
# lam = conmat([lamDL, lamS, lamDU])
# ETlambda1 = conmat([E1, E2])*lam
# assert np.allclose(ETlambda1, ETlambda), "{0} {1}".format(ETlambda1, ETlambda)
return lamDL, lamS, lamDU
def return_problem(self):
"""
Args:
(None)
Returns:
problem: a dictionary with keys ``P``, ``c``, ``d``, ``A``, ``a``,
``u``, ``N``, ``M``, ``q`` defining the
"""
problem = {
'P': self.P,
'c': self.c,
'd': self.d,
'A': self.A,
'a': self.a,
'u': self.u,
'N': self.N,
'M': self.M,
'q': self.q}
return problem, self.info, self.param
def export_QPCC_data(self):
P, info, param = self.return_problem()
n = param['n']
m = param['m']
md = info['md']
ms = info['ms']
l = param['l']
varsused = [1] * (n + m) + [0] * (m + md)
names = create_name("x", n) + create_name("y", m) + \
create_name("lamL", md) + create_name("lamL", ms, start=md) + \
create_name("lamU", md)
objQ = matrix([
[matrix(0.5 * P['P']), matrix(zeros(m + md, m + n))],
[matrix(zeros(m + n, m + md)), matrix(zeros(m + md, m + md))]])
objp = conmat([P['c'], P['d'], zeros(m + md)])
objr = 0
G1 = conmat([P['A'], zeros(l, m + md)], option='h')
h1 = -P['a']
G2 = conmat([zeros(md, n), -eye(md), zeros(md, 2 * m)], option='h')
h2 = zeros(md)
G3 = conmat([zeros(md, n), eye(md), zeros(md, 2 * m)], option='h')
h3 = P['u']
G4 = conmat([zeros(ms, n + md), -eye(ms),
zeros(ms, m + md)], option='h')
h4 = zeros(ms)
G5 = conmat([zeros(m, n + m), -eye(m), zeros(m, md)], option='h')
h5 = zeros(m)
G6 = conmat([zeros(md, n + 2 * m), -eye(md)], option='h')
h6 = zeros(md)
if isinstance(self, Qpecgen201):
G7 = conmat([-eye(n), zeros(n, 2 * m + md)], option='h')
h7 = -self.info['xl']
G8 = conmat([eye(n), zeros(n, 2 * m + md)], option='h')
h8 = self.info['xu']
A1 = conmat(
[P['N'][:md], P['M'][:md], -eye(md), zeros(md, ms), eye(md)], option='h')
b1 = -P['q'][:md]
A2 = conmat(
[P['N'][md:], P['M'][md:], zeros(ms, md), -eye(ms), zeros(ms, md)],
option='h')
b2 = -P['q'][md:]
details = {
'varsused': varsused,
'geninfo': info,
'genparam': param,
'gensol': self.make_QPCC_sol()}
return locals()
class Qpecgen201(Qpecgen200):
"""
This subclass of ``qpecgen.box.Qpecgen200`` generates a more specific type
of BOX-QPEC problem known as the FULL-BOX-QPEC. Type 201 is a more
specific case of type 200 where x variables are constrained
:math:`x_l \leq x \leq x_u` and y variables are constrained
:math:`0 \leq y \leq y_u \leq 10` for integers :math:`x_l \in [-10, 0]`,
:math:`x_u \in [1, 10]`, :math:`y_u \in [1, 10]`.
Some class methods (not shown here due to private status) are overridden
for this class so that problems are generated with the full box structure.
Methods for
"""
def __init__(self, pname, param):
super(Qpecgen201, self).__init__(pname, param, qpec_type=201)
def _make_info(self):
md = self.param['m']
l = self.param['l']
second_deg = self.param['second_deg']
# Note that we only consider the following two cases of box constraints:
# y(i) in [0, +inf) or [0, u] where u is a nonnegative scalar.
# Clearly, any other interval can be obtained by using the mapping
# y <--- c1+c2*y.
# It is assumed that the last m_inf constraints are of the form [0, inf)
# The remaining m_inf - m_inf_deg - inf_nonactive constraints are where
# F=0, y>0
# y variables which are bounded below and above
# There will be m - m_inf variables with double sided bounds. each upper
# bound is chosen uniform in [0,10]
md_upp_deg = choose_num(second_deg)
# degenerate with y at upper bound: F=0, y=u
md_low_deg = second_deg - md_upp_deg
# degenerate with y at lower bound: F=0, y=0
md_upp_nonactive = choose_num(md - md_upp_deg - md_low_deg)
# F not active, y at upper bound: F<0, y=u
md_low_nonactive = choose_num(
md - md_upp_deg - md_low_deg - md_upp_nonactive)
# F not active, y at lower bound: F>0, y=0
l_deg = self.param['first_deg']
l_nonactive = choose_num(l - l_deg)
self.info.update({
'ms': 0,
'ms_deg': 0,
'ms_nonactive': 0,
'ms_active': 0,
'md': md,
'md_upp_deg': md_upp_deg,
'md_low_deg': md_low_deg,
'md_upp_nonactive': md_upp_nonactive,
'md_low_nonactive': md_low_nonactive,
'md_float': (
md - md_upp_deg - md_low_deg - md_upp_nonactive - md_low_nonactive),
# Randomly decide how many of the non-degenerate first level ctrs
# should be nonactive
'l': l,
'l_deg': l_deg,
'l_nonactive': l_nonactive,
'l_active': l - l_deg - l_nonactive})
def _make_xgen(self):
xl = randint(-10, 0, self.param['n'])
xu = randint(1, 10, self.param['n'])
self.info['xl'] = npvec(xl)
self.info['xu'] = npvec(xu)
self.info['xgen'] = npvec(
[(xl[i] + (xu[i] - xl[i]) * randcst())[0] for i in range(self.param['n'])])
# raise Exception(xl, xu, self.info['xgen'])
def _make_u(self):
self.u = randint(0, 10, self.info['md'])
| StarcoderdataPython |
1775622 | from controllers.main_controller import MainController
class MainMenu:
OPTION_MENU = {
'1': "show_members",
'2': "available_slots"
}
@classmethod
def default_method(cls, *args, **kwargs):
print('Help menu!')
@classmethod
def show_options(csl, current_user):
option = input()
method_name = cls.OPTION_MENU.get(option, cls.default_method)
method_name(**{})
# TODO decide what you want to do with this dict
# if option == '1':
# members = MainController.show_members(current_user)
# cls._pretty_print_members(members)
@classmethod
def _pretty_print_members(cls, members):
for member in members:
print('{status} {username}'.format(status = getattr(member, ""),
username = member.username)) | StarcoderdataPython |
9694489 | # __init__.py
from .regression import Regression
from .classification import Classification | StarcoderdataPython |
9712663 | """Constants.
"""
# A constant value used to avoid division by zero, zero logarithms
# and any possible mathematical error
EPSILON = 1e-10
| StarcoderdataPython |
97124 | <reponame>J-Obog/market-simulator
from flask import request, jsonify
from app import db, bcrypt, cache, jwt
from api.accounts.model import Account, AccountSchema
from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, current_user
from datetime import timedelta
@jwt.token_in_blocklist_loader
def check_token_in_blacklist(_, jwt_payload):
return cache.get(jwt_payload['sub']) is not None
@jwt.user_lookup_loader
def user_lookup(_, jwt_payload):
return Account.query.filter_by(id=jwt_payload['sub']).one_or_none()
""" Log a user out"""
@jwt_required()
def logout():
cache.set(current_user.id, '', ex=3600)
return jsonify(message='Logout successful')
""" Log a user in """
def login():
# request body vars
email = request.json.get('email')
password = <PASSWORD>('password')
# query for account with matching email
acc = Account.query.filter_by(email=email).first()
# validate if there's a match and the match shares the same password
if acc:
if bcrypt.check_password_hash(acc.password, password):
access_token = create_access_token(identity=acc.id, expires_delta=timedelta(hours=1))
refresh_token = create_refresh_token(identity=acc.id, expires_delta=timedelta(days=30))
return jsonify(access_token=access_token, refresh_token=refresh_token)
else:
return jsonify(message='Email and password must match'), 401
else:
return jsonify(message='No matching account for email'), 401
""" Sign user up """
def register_user():
# request body vars
email = request.json.get('email')
password = request.json.get('password')
# handling validation errors
try:
AccountSchema().load(request.json)
except Exception as e:
return jsonify(message=e.messages), 401
if Account.query.filter_by(email=email).first():
return jsonify(message={'email': ['Account with email already exists']}), 401
# loading user into db
acc = Account(email=email, password=bcrypt.generate_password_hash(password, 10).decode('utf-8'))
db.session.add(acc)
db.session.commit()
return jsonify(message='Registration successful') | StarcoderdataPython |
3344848 | <reponame>hyu-iot/gem5
#!/usr/bin/env python
# Copyright (c) 2020 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import datetime
import subprocess
import sys
import git_filter_repo
import update_copyright
parser = argparse.ArgumentParser(description=
"""Update copyright headers on files of a range of commits.
This can be used to easily update copyright headers at once on an entire
patchset before submitting.
Only files touched by the selected commits are updated.
Only existing copyrights for the selected holder are updated, new
notices are never automatically added if not already present.
The size of the changes is not taken into account, every touched file gets
updated. If you want to undo that for a certain file because the change to
it is trivial, you need to manually rebase and undo the copyright change
for that file.
Example usage with an organization alias such as `arm`:
```
python3 -m pip install --user --requirement \
gem5/util/update_copyright/requirements.txt
./update-copyright.py -o arm HEAD~3
```
The above would act on the 3 last commits (HEAD~2, HEAD~ and HEAD),
leaving HEAD~3 unchanged, and doing updates such as:
```
- * Copyright (c) 2010, 2012-2013, 2015,2017-2019 ARM Limited
+ * Copyright (c) 2010, 2012-2013, 2015,2017-2020 ARM Limited
```
If the organization is not in the alias list, you can also explicitly give
the organization string as in:
```
./update-copyright.py HEAD~3 'ARM Limited'
```
which is equivalent to the previous invocation.
""",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument('start',
nargs='?',
help="The commit before the last commit to be modified")
parser.add_argument('org-string',
nargs='?',
help="Copyright holder name")
parser.add_argument('-o', '--org', choices=('arm',),
help="Alias for known organizations")
args = parser.parse_args()
def error(msg):
print('error: ' + msg, file=sys.stderr)
sys.exit(1)
# The existing safety checks are too strict, so we just disable them
# with force, and do our own checks to not overwrite uncommited changes
# checks.
# https://github.com/newren/git-filter-repo/issues/159
if subprocess.call(['git', 'diff', '--staged', '--quiet']):
error("uncommitted changes")
if subprocess.call(['git', 'diff', '--quiet']):
error("unstaged changes")
# Handle CLI arguments.
if args.start is None:
error("the start argument must be given")
if args.org is None and getattr(args, 'org-string') is None:
error("either --org or org-string must be given")
if args.org is not None and getattr(args, 'org-string') is not None:
error("both --org and org-string given")
if args.org is not None:
org_bytes = update_copyright.org_alias_map[args.org]
else:
org_bytes = getattr(args, 'org-string').encode()
# Call git_filter_repo.
# Args deduced from:
# print(git_filter_repo.FilteringOptions.parse_args(['--refs', 'HEAD',
# '--force'], error_on_empty=False))
filter_repo_args = git_filter_repo.FilteringOptions.default_options()
filter_repo_args.force = True
filter_repo_args.partial = True
filter_repo_args.refs = ['{}..HEAD'.format(args.start)]
filter_repo_args.repack=False
filter_repo_args.replace_refs='update-no-add'
def blob_callback(blob, callback_metadata, org_bytes):
blob.data = update_copyright.update_copyright(blob.data,
datetime.datetime.now().year, org_bytes)
git_filter_repo.RepoFilter(
filter_repo_args,
blob_callback=lambda x, y: blob_callback( x, y, org_bytes)
).run()
| StarcoderdataPython |
293045 | <reponame>suwoongleekor/DRS
#!/usr/bin/env python
# coding: utf-8
#
# Author: <NAME>
# URL: https://kazuto1011.github.io
# Date: 07 January 2019
from __future__ import absolute_import, division, print_function
import random
import os
import argparse
import cv2
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import yaml
from addict import Dict
from PIL import Image
from libs.datasets import get_dataset
from libs.models import *
from libs.utils import PolynomialLR
from libs.utils.stream_metrics import StreamSegMetrics, AverageMeter
import pudb
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--config_path", type=str, help="config file path")
parser.add_argument("--gt_path", type=str, help="gt label path")
parser.add_argument("--log_dir", type=str, help="training log path")
parser.add_argument("--cuda", type=bool, default=True, help="GPU")
parser.add_argument("--random_seed", type=int, default=1, help="random seed (default: 1)")
parser.add_argument("--amp", action='store_true', default=False)
parser.add_argument("--val_interval", type=int, default=500, help="val_interval")
return parser
def makedirs(dirs):
if not os.path.exists(dirs):
os.makedirs(dirs)
def get_device(cuda):
cuda = cuda and torch.cuda.is_available()
device = torch.device("cuda" if cuda else "cpu")
if cuda:
print("Device:")
for i in range(torch.cuda.device_count()):
print(" {}:".format(i), torch.cuda.get_device_name(i))
else:
print("Device: CPU")
return device
def get_params(model, key):
# For Dilated FCN
if key == "1x":
for m in model.named_modules():
if "layer" in m[0]:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
yield p
# For conv weight in the ASPP module
if key == "10x":
for m in model.named_modules():
if "aspp" in m[0]:
if isinstance(m[1], nn.Conv2d):
yield m[1].weight
# For conv bias in the ASPP module
if key == "20x":
for m in model.named_modules():
if "aspp" in m[0]:
if isinstance(m[1], nn.Conv2d):
yield m[1].bias
def resize_labels(labels, size):
"""
Downsample labels for 0.5x and 0.75x logits by nearest interpolation.
Other nearest methods result in misaligned labels.
-> F.interpolate(labels, shape, mode='nearest')
-> cv2.resize(labels, shape, interpolation=cv2.INTER_NEAREST)
"""
new_labels = []
for label in labels:
label = label.float().numpy()
label = Image.fromarray(label).resize(size, resample=Image.NEAREST)
new_labels.append(np.asarray(label))
new_labels = torch.LongTensor(new_labels)
return new_labels
def main():
opts = get_argparser().parse_args()
print(opts)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
"""
Training DeepLab by v2 protocol
"""
# Configuration
with open(opts.config_path) as f:
CONFIG = Dict(yaml.load(f))
device = get_device(opts.cuda)
torch.backends.cudnn.benchmark = True
# pu.db
# Dataset
train_dataset = get_dataset(CONFIG.DATASET.NAME)(
root=CONFIG.DATASET.ROOT,
split=CONFIG.DATASET.SPLIT.TRAIN,
ignore_label=CONFIG.DATASET.IGNORE_LABEL,
mean_bgr=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G, CONFIG.IMAGE.MEAN.R),
augment=True,
base_size=CONFIG.IMAGE.SIZE.BASE,
crop_size=CONFIG.IMAGE.SIZE.TRAIN,
scales=CONFIG.DATASET.SCALES,
flip=True,
gt_path=opts.gt_path,
)
print(train_dataset)
print()
valid_dataset = get_dataset(CONFIG.DATASET.NAME)(
root=CONFIG.DATASET.ROOT,
split=CONFIG.DATASET.SPLIT.VAL,
ignore_label=CONFIG.DATASET.IGNORE_LABEL,
mean_bgr=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G, CONFIG.IMAGE.MEAN.R),
augment=False,
gt_path="SegmentationClassAug",
)
print(valid_dataset)
# DataLoader
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=CONFIG.SOLVER.BATCH_SIZE.TRAIN,
num_workers=CONFIG.DATALOADER.NUM_WORKERS,
shuffle=True,
)
valid_loader = torch.utils.data.DataLoader(
dataset=valid_dataset,
batch_size=CONFIG.SOLVER.BATCH_SIZE.TEST,
num_workers=CONFIG.DATALOADER.NUM_WORKERS,
shuffle=False,
)
# Model check
print("Model:", CONFIG.MODEL.NAME)
assert (
CONFIG.MODEL.NAME == "DeepLabV2_ResNet101_MSC"
), 'Currently support only "DeepLabV2_ResNet101_MSC"'
# Model setup
model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)
print(" Init:", CONFIG.MODEL.INIT_MODEL)
state_dict = torch.load(CONFIG.MODEL.INIT_MODEL, map_location='cpu')
for m in model.base.state_dict().keys():
if m not in state_dict.keys():
print(" Skip init:", m)
model.base.load_state_dict(state_dict, strict=False) # to skip ASPP
model = nn.DataParallel(model)
model.to(device)
# Loss definition
criterion = nn.CrossEntropyLoss(ignore_index=CONFIG.DATASET.IGNORE_LABEL)
criterion.to(device)
# Optimizer
optimizer = torch.optim.SGD(
# cf lr_mult and decay_mult in train.prototxt
params=[
{
"params": get_params(model.module, key="1x"),
"lr": CONFIG.SOLVER.LR,
"weight_decay": CONFIG.SOLVER.WEIGHT_DECAY,
},
{
"params": get_params(model.module, key="10x"),
"lr": 10 * CONFIG.SOLVER.LR,
"weight_decay": CONFIG.SOLVER.WEIGHT_DECAY,
},
{
"params": get_params(model.module, key="20x"),
"lr": 20 * CONFIG.SOLVER.LR,
"weight_decay": 0.0,
},
],
momentum=CONFIG.SOLVER.MOMENTUM,
)
# Learning rate scheduler
scheduler = PolynomialLR(
optimizer=optimizer,
step_size=CONFIG.SOLVER.LR_DECAY,
iter_max=CONFIG.SOLVER.ITER_MAX,
power=CONFIG.SOLVER.POLY_POWER,
)
# Path to save models
checkpoint_dir = os.path.join(
CONFIG.EXP.OUTPUT_DIR,
"models",
opts.log_dir,
CONFIG.MODEL.NAME.lower(),
CONFIG.DATASET.SPLIT.TRAIN,
)
makedirs(checkpoint_dir)
print("Checkpoint dst:", checkpoint_dir)
model.train()
metrics = StreamSegMetrics(CONFIG.DATASET.N_CLASSES)
scaler = torch.cuda.amp.GradScaler(enabled=opts.amp)
avg_loss = AverageMeter()
avg_time = AverageMeter()
curr_iter = 0
best_score = 0
end_time = time.time()
while True:
for _, images, labels, cls_labels in train_loader:
# print(images[0, :, :, :].shape)
# images_np = images[0, :, :, :].cpu().numpy().transpose(1, 2, 0) # make sure tensor is on cpu
# labels_np = labels[0, :, :].cpu().numpy() # make sure tensor is on cpu
# print(labels_np)
# cv2.imwrite("image.png", images_np)
# cv2.imwrite("label.png", labels_np)
# pu.db
curr_iter += 1
loss = 0
optimizer.zero_grad()
with torch.cuda.amp.autocast(enabled=opts.amp):
# Propagate forward
model.train()
logits = model(images.to(device))
# Loss
for logit in logits:
# Resize labels for {100%, 75%, 50%, Max} logits
_, _, H, W = logit.shape
labels_ = resize_labels(labels, size=(H, W))
pseudo_labels = logit.detach() * cls_labels[:, :, None, None].to(device)
pseudo_labels = pseudo_labels.argmax(dim=1)
_loss = criterion(logit, labels_.to(device)) + criterion(logit, pseudo_labels)
loss += _loss
# Propagate backward (just compute gradients wrt the loss)
loss = (loss / len(logits))
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
# Update learning rate
scheduler.step()
avg_loss.update(loss.item())
avg_time.update(time.time() - end_time)
end_time = time.time()
# TensorBoard
if curr_iter % 10 == 0:
print(" Itrs %d/%d, Loss=%6f, Time=%.2f , LR=%.8f" %
(curr_iter, CONFIG.SOLVER.ITER_MAX,
avg_loss.avg, avg_time.avg*1000, optimizer.param_groups[0]['lr']))
# validation
if curr_iter % opts.val_interval == 0:
print("... validation")
model.eval()
metrics.reset()
with torch.no_grad():
for _, images, labels, _ in valid_loader:
images = images.to(device)
# Forward propagation
logits = model(images)
# Pixel-wise labeling
_, H, W = labels.shape
# print(logits)
logits = F.interpolate(logits, size=(H, W),
mode="bilinear", align_corners=False)
preds = torch.argmax(logits, dim=1).cpu().numpy()
targets = labels.cpu().numpy()
metrics.update(targets, preds)
score = metrics.get_results()
print(metrics.to_str(score))
if score['Mean IoU'] > best_score: # save best model
best_score = score['Mean IoU']
torch.save(
model.module.state_dict(), os.path.join(checkpoint_dir, "checkpoint_best.pth")
)
if curr_iter > CONFIG.SOLVER.ITER_MAX:
return
if __name__ == "__main__":
main()
| StarcoderdataPython |
12827530 | <filename>docsim/utils/text.py
from collections import Counter
import re
from typing import List, Pattern, Set
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
pat_token_non_alphabet: Pattern = re.compile('^[^a-z]+$')
stop_words: Set[str] = set(stopwords.words('english'))
def tokenize(text: str) -> List[str]:
return word_tokenize(text)
def remove_stopwords(tokens: List[str]) -> List[str]:
return [w for w in tokens if w not in stop_words]
def remove_nonalphabet_tokens(tokens: List[str]) -> List[str]:
"""
it requires `lower` in advance
"""
return [w for w in tokens if pat_token_non_alphabet.match(w) is None]
def extract_toptf_tokens(tokens: List[str],
n_words: int) -> List[str]:
return [token for token, _ in Counter(tokens).most_common(n_words)]
| StarcoderdataPython |
6491674 | class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
N = len(nums)
if N <= 1:
return
index = N -2
while index >= 0 and nums[index] >= nums[index+1]:
index -= 1
if index == -1:
nums.reverse()
return
index2 = N -1
while index2 >= index and nums[index] >= nums[index2]:
index2 -= 1
nums[index], nums[index2] = nums[index2], nums[index]
left = index + 1
right = N -1
while left < right:
nums[left], nums[right] = nums[right], nums[left]
left += 1
right -= 1 | StarcoderdataPython |
1720256 | <gh_stars>0
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource property Get."""
import json
def _GetMetaDict(items, key, value):
"""Gets the dict in items that contains key==value.
A metadict object is a list of dicts of the form:
[
{key: value-1, ...},
{key: value-2, ...},
...
]
Args:
items: A list of dicts.
key: The dict key name.
value: The dict key value.
Returns:
The dict in items that contains key==value or None if no match.
"""
try:
for item in items:
if item.get(key) == value:
return item
except (AttributeError, IndexError, TypeError, ValueError):
pass
return None
def _GetMetaDataValue(items, name, deserialize=False):
"""Gets the metadata value for the item in items with key == name.
A metadata object is a list of dicts of the form:
[
{'key': key-name-1, 'value': field-1-value-string},
{'key': key-name-2, 'value': field-2-value-string},
...
]
Examples:
x.metadata[windows-keys].email
Deserializes the 'windows-keys' metadata value and gets the email value.
x.metadata[windows-keys]
Gets the 'windows-key' metadata string value.
x.metadata[windows-keys][]
Gets the deserialized 'windows-key' metadata value.
Args:
items: The metadata items list.
name: The metadata name (which must match one of the 'key' values).
deserialize: If True then attempt to deserialize a compact JSON string.
Returns:
The metadata value for name or None if not found or if items is not a
metadata dict list.
"""
item = _GetMetaDict(items, 'key', name)
if item is None:
return None
value = item.get('value', None)
if deserialize:
try:
return json.loads(value)
except (TypeError, ValueError):
pass
return value
def Get(resource, key, default=None):
"""Gets the value referenced by key in the object resource.
Since it is common for resource instances to be sparse it is not an error if
a key is not present in a particular resource instance, or if an index does
not match the resource type.
Args:
resource: The resource object possibly containing a value for key.
key: Ordered list of key names/indices, applied left to right. Each
element in the list may be one of:
str - A resource property name. This could be a class attribute name or
a dict index.
int - A list index. Selects one member is the list. Negative indices
count from the end of the list, starting with -1 for the last element
in the list. An out of bounds index is not an error; it produces the
value None.
None - A list slice. Selects all members of a list or dict like object.
A slice of an empty dict or list is an empty dict or list.
default: Get() returns this value if key is not in resource.
Returns:
The value, None if any of the given keys are not found. This is
intentionally not an error. In this context a value can be any data
object: dict, list, tuple, class, str, int, float, ...
"""
if isinstance(resource, set):
resource = sorted(resource)
meta = None
for i, index in enumerate(key):
# This if-ladder ordering checks builtin object attributes last. For
# example, with resource = {'items': ...}, Get() treats 'items' as a dict
# key rather than the builtin 'items' attribute of resource.
if resource is None:
# None is different than an empty dict or list.
return default
elif meta:
resource = _GetMetaDict(resource, meta, index)
meta = None
elif hasattr(resource, 'iteritems'):
# dict-like
if index is None:
if i + 1 < len(key):
# Inner slice: *.[].*
return [Get(resource, [k] + key[i + 1:], default) for k in resource]
else:
# Trailing slice: *.[]
return resource
elif index in resource:
resource = resource[index]
elif 'items' in resource:
# It would be nice if there were a better metadata indicator.
# _GetMetaDataValue() returns None if resource['items'] isn't really
# metadata, so there is a bit more verification than just 'items' in
# resource.
resource = _GetMetaDataValue(
resource['items'], index, deserialize=i + 1 < len(key))
else:
return default
elif isinstance(index, basestring) and hasattr(resource, index):
# class-like
resource = getattr(resource, index, default)
elif hasattr(resource, '__iter__') or isinstance(resource, basestring):
# list-like
if index is None:
if i + 1 < len(key):
# Inner slice: *.[].*
return [Get(resource, [k] + key[i + 1:], default)
for k in range(len(resource))]
else:
# Trailing slice: *.[]
return resource
elif not isinstance(index, (int, long)):
if (isinstance(index, basestring) and
isinstance(resource, list) and
len(resource) and
isinstance(resource[0], dict)):
# Let the next iteration check for a meta dict.
meta = index
continue
# Index mismatch.
return default
elif index in xrange(-len(resource), len(resource)):
resource = resource[index]
else:
return default
else:
# Resource or index mismatch.
return default
if isinstance(resource, set):
resource = sorted(resource)
return resource
def IsListLike(resource):
"""Checks if resource is a list-like iterable object.
Args:
resource: The object to check.
Returns:
True if resource is a list-like iterable object.
"""
return (isinstance(resource, list) or
hasattr(resource, '__iter__') and hasattr(resource, 'next'))
| StarcoderdataPython |
6580221 | <filename>util/filter_buscos.py
#!/usr/bin/env python
#script to reformat Augustus BUSCO results
import sys, os, itertools
if len(sys.argv) < 4:
print("Usage: filter_buscos.py busco.evm.gff3 full_table_species busco.final.gff3")
sys.exit(1)
def group_separator(line):
return line=='\n'
#parse the busco table into dictionary format
busco_complete = {}
with open(sys.argv[2], 'rU') as buscoinput:
for line in buscoinput:
cols = line.split('\t')
if cols[1] == 'Complete':
ID = cols[2].replace('evm.model.', '')
if not ID in busco_complete:
busco_complete[ID] = (cols[0], cols[3])
else:
score = busco_complete.get(ID)[1]
if float(cols[3]) > float(score):
busco_complete[ID] = (cols[0], cols[3])
print ID, 'updated dictionary'
else:
print ID, 'is repeated and score is less'
#now parse the evm busco file, group them
results = []
with open(sys.argv[1]) as f:
for key, group in itertools.groupby(f, group_separator):
if not key:
results.append(list(group))
#loop through each gene model, lookup the BUSCO name, and then replace the name with counter based and busco model name
'''
scaffold_1 EVM gene 18407 18947 . - . ID=evm.TU.scaffold_1.1;Name=EVM%20prediction%20scaffold_1.1
scaffold_1 EVM mRNA 18407 18947 . - . ID=evm.model.scaffold_1.1;Parent=evm.TU.scaffold_1.1;Name=EVM%20prediction%20scaffold_1.1
scaffold_1 EVM exon 18772 18947 . - . ID=evm.model.scaffold_1.1.exon1;Parent=evm.model.scaffold_1.1
scaffold_1 EVM CDS 18772 18947 . - 0 ID=cds.evm.model.scaffold_1.1;Parent=evm.model.scaffold_1.1
scaffold_1 EVM exon 18407 18615 . - . ID=evm.model.scaffold_1.1.exon2;Parent=evm.model.scaffold_1.1
scaffold_1 EVM CDS 18407 18615 . - 1 ID=cds.evm.model.scaffold_1.1;Parent=evm.model.scaffold_1.1
'''
counter = 0
with open(sys.argv[3], 'w') as output:
for i in results:
counter += 1
cols = i[0].split('\t')
ID = cols[8].split(';')[0]
ID = ID.replace('ID=', '')
lookup = ID.replace('evm.TU.', '')
if lookup in busco_complete:
name = busco_complete.get(lookup)[0]
geneID = 'gene'+str(counter)
mrnaID = 'mrna'+str(counter)
newblock = ''.join(i)
newblock = newblock.replace('EVM%20prediction%20'+lookup, name)
newblock = newblock.replace(ID, geneID)
newblock = newblock.replace('evm.model.'+lookup, mrnaID)
output.write(newblock+'\n')
| StarcoderdataPython |
46838 | <reponame>vidma/kensu-py
# coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: beta
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
class FieldDef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'field_type': 'str',
'nullable': 'bool'
}
attribute_map = {
'name': 'name',
'field_type': 'fieldType',
'nullable': 'nullable'
}
def __init__(self, name=None, field_type=None, nullable=None):
"""
FieldDef - a model defined in Swagger
"""
self._name = None
self._field_type = None
self._nullable = None
self.name = name
self.field_type = field_type
self.nullable = nullable
@property
def name(self):
"""
Gets the name of this FieldDef.
:return: The name of this FieldDef.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this FieldDef.
:param name: The name of this FieldDef.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def field_type(self):
"""
Gets the field_type of this FieldDef.
:return: The field_type of this FieldDef.
:rtype: str
"""
return self._field_type
@field_type.setter
def field_type(self, field_type):
"""
Sets the field_type of this FieldDef.
:param field_type: The field_type of this FieldDef.
:type: str
"""
if field_type is None:
raise ValueError("Invalid value for `field_type`, must not be `None`")
self._field_type = field_type
@property
def nullable(self):
"""
Gets the nullable of this FieldDef.
:return: The nullable of this FieldDef.
:rtype: bool
"""
return self._nullable
@nullable.setter
def nullable(self, nullable):
"""
Sets the nullable of this FieldDef.
:param nullable: The nullable of this FieldDef.
:type: bool
"""
if nullable is None:
raise ValueError("Invalid value for `nullable`, must not be `None`")
self._nullable = nullable
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FieldDef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| StarcoderdataPython |
3201518 | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import json
import os
from contextlib import contextmanager
from newrelic.api.application import (application_instance as
current_application)
from newrelic.api.background_task import BackgroundTask
from newrelic.core.rules_engine import SegmentCollapseEngine
from newrelic.core.agent import agent_instance
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
JSON_DIR = os.path.normpath(os.path.join(CURRENT_DIR, 'fixtures'))
OUTBOUD_REQUESTS = {}
_parameters_list = ['testname', 'transaction_segment_terms', 'tests']
def load_tests():
result = []
path = os.path.join(JSON_DIR, 'transaction_segment_terms.json')
with open(path, 'r') as fh:
tests = json.load(fh)
for test in tests:
values = tuple([test.get(param, None) for param in _parameters_list])
result.append(values)
return result
_parameters = ",".join(_parameters_list)
@pytest.mark.parametrize(_parameters, load_tests())
def test_transaction_segments(testname, transaction_segment_terms, tests):
engine = SegmentCollapseEngine(transaction_segment_terms)
for test in tests:
assert engine.normalize(test['input'])[0] == test['expected']
@contextmanager
def segment_rules(name, rules):
application = agent_instance().application(name)
old_rules = application._rules_engine['segment']
new_rules = SegmentCollapseEngine(rules)
application._rules_engine['segment'] = new_rules
yield
application._rules_engine['segment'] = old_rules
@pytest.mark.parametrize(_parameters, load_tests())
def test_transaction_freeze_path_segments(testname, transaction_segment_terms,
tests):
application = current_application()
# We can't check all possibilites by doing things via the transaction
# as it not possible to set up a metric path of only one segment.
with segment_rules(application.name, transaction_segment_terms):
for test in tests:
segments = test['input'].split()
if len(segments) < 2:
continue
ttype = segments[0]
group = '/'.join(segments[1:2])
name = '/'.join(segments[2:])
with BackgroundTask(application, name, group) as transaction:
transaction.background_task = (ttype == 'OtherTransaction')
assert transaction.path == test['expected']
| StarcoderdataPython |
347405 | <filename>enum2magic.py
#!/usr/bin/python
#
# @author: <NAME>
# @version: 1.9.5
# @date: 2015-01-29
import sys
class Parser(object):
""" This parser converts C-Enum syntax to Magic Number syntax"""
IDLE = 0
BEGIN_HEADER = 1
PARSING_HEADER = 2
BEGIN_ENUM = 3
PARSING_ENUM = 4
def __init__(self, filename):
super(Parser, self).__init__()
self.current_state = Parser.IDLE
self.filename = filename
self.counter = 0
self.target_file = None
self.event_table = {
Parser.IDLE : self.on_idle,
Parser.BEGIN_HEADER : self.on_begin_header,
Parser.PARSING_HEADER : self.on_header,
Parser.BEGIN_ENUM : self.on_begin_enum,
Parser.PARSING_ENUM : self.on_enum
}
self.config = {
'target_name' : '',
'target_template': '{element} = {value}',
'ignore_prefix' : '',
}
def _transition(self, line):
state = self.current_state
if state == Parser.IDLE and line.startswith('---'):
self.current_state = Parser.BEGIN_HEADER
elif (state == Parser.PARSING_HEADER or state == Parser.BEGIN_HEADER) and line.startswith('---'):
self.current_state = Parser.IDLE
elif state == Parser.BEGIN_HEADER:
self.current_state = Parser.PARSING_HEADER
elif state == Parser.IDLE and line.find('{') > -1:
self.current_state = Parser.BEGIN_ENUM
elif (state == Parser.PARSING_ENUM or state == Parser.BEGIN_ENUM) and line.find('}') > -1:
self.current_state = Parser.IDLE
elif state == Parser.BEGIN_ENUM:
self.current_state = Parser.PARSING_ENUM
else:
pass
def _fire_events(self, line):
try:
self.event_table[self.current_state](line)
except Exception, e:
print '*** WARNING:' , e , ' ***'
def on_idle(self, line):
pass
def on_begin_header(self, line):
print '- Reading Config'
def on_header(self, line):
# print '%-35s => on_header' % line
colon_ind = line.find(':')
if colon_ind < 0: return
key = line[:colon_ind].strip()
value = line[colon_ind+1:].strip()
if key in self.config:
self.config[key] = value
print '-- {key:<15} -> {value}'.format(key=key, value=value)
def _get_target_file(self):
if self.target_file == None and self.config['target_name']:
self.target_file = open(self.config['target_name'], 'w')
return self.target_file
@staticmethod
def _break_down(line):
value = None
comment = ''
# get comment
comment_ind = line.find('//')
if comment_ind > -1:
comment = line[comment_ind:]
line = line[:comment_ind]
line = line.strip(', ')
# get value
equal_ind = line.find('=')
if equal_ind > -1:
value = int(line[equal_ind+1:])
line = line[:equal_ind]
line = line.strip(', ')
# get content
return {'element': line,
'value' : value,
'comment': comment}
def _render(self, **kwargs):
return self.config['target_template'].format(**kwargs)
def on_begin_enum(self, line):
print '- Reading Enum'
def on_enum(self, line):
f = self._get_target_file()
if f :
pack = Parser._break_down(line)
pack['element'] = pack['element'].lstrip(self.config['ignore_prefix'])
self.counter = self.counter if pack['value'] is None else pack['value']
if pack['element']:
data = self._render(element = pack['element'],\
value = self.counter,\
comment = pack['comment'])
self.counter += 1
else:
data = pack['comment']
f.write(data + '\n')
def parse(self):
with open(self.filename) as f:
for line in f:
line = line.strip()
self._transition(line)
self._fire_events(line)
# print '%-35s => state: %s' % (line, self.current_state)
if self.target_file:
self.target_file.close()
if __name__ == '__main__':
fname = 'sample'
if len(sys.argv) == 2: fname = sys.argv[1]
try:
print '=== Begin Parsing: %s ===' % fname
Parser(fname).parse()
except Exception, e:
print '*** ERROR:' , e , ' ***'
finally:
print '=== End Parsing: %s ===' % fname
| StarcoderdataPython |
5080085 | #Part of Guess the Book project by <NAME>. MIT Licence 2019.
# https://github.com/dmitry-dereshev/guess-the-book-public
#The script takes in a top-level folder with .txt files, and transforms
#them into a Pandas DataFrame of symbol frequencies. The DataFrame is
#then saves as a .csv.
import pandas as pd
import os
def frequentize(path_to_txt, path_for_csv):
# Creates a list of all paths to .txt files
txt_files = [os.path.join(root, name)
for root, dirs, files in os.walk(path_to_txt)
for name in files]
temp_store = [] #stores .txt frequency dictionaries
len_data = len(txt_files)
counter = 0
for paths in txt_files:
counter += 1
alist = [line.rstrip() for line in open(paths, errors='ignore')]
stringify = ''.join(alist)
res = {}
res["ID"] = os.path.basename(paths)
# using dict.get() to get count of each element in string
for keys in stringify:
res[keys] = res.get(keys, 0) + 1
temp_store.append(res)
print("processing %s of %s through temp_store" % (counter, len_data))
print("assembling temp_res into a dataframe...")
# Turns a list of dictioanries into a Pandas DataFrame.
symbols_assemble = pd.DataFrame(temp_store)
id_column = symbols_assemble["ID"]
symbols_assemble.drop(labels=['ID'], axis=1,inplace = True)
symbols_assemble.insert(0, 'ID', id_column)
print("saving everything into a csv...")
symbols_assemble.to_csv(path_for_csv)
# Top-level folder with .txt files
# The script checks for all subfolders as well.
path_to_txt = 'top/folder/with/.txt/files'
path_for_csv = 'where/you/want/the/resulting/table.csv'
frequentize(path_to_txt, path_for_csv) | StarcoderdataPython |
8035197 | import os
import sys
import time
sys.path.append('../DDSP/')
from DDSP import DDSP
if __name__ == '__main__':
ddsp = DDSP(sys.argv[1], sys.argv[2])
time.sleep(3)
ddsp.addContent(b'123456789012345678901234567890123456789012345678901234567890abcd')
time.sleep(10)
print ("Reached the end of Publisher.py")
os._exit(1) | StarcoderdataPython |
1802278 | <reponame>atombrella/auditwheel<gh_stars>0
from subprocess import check_call, check_output, CalledProcessError
import pytest
import os
import os.path as op
import tempfile
import shutil
import warnings
from distutils.spawn import find_executable
VERBOSE = True
ENCODING = 'utf-8'
MANYLINUX_IMAGE_ID = 'quay.io/pypa/manylinux1_x86_64'
DOCKER_CONTAINER_NAME = 'auditwheel-test-manylinux'
PYTHON_IMAGE_ID = 'python:3.5'
PATH = ('/opt/python/cp35-cp35m/bin:/opt/rh/devtoolset-2/root/usr/bin:'
'/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin')
WHEEL_CACHE_FOLDER = op.expanduser('~/.cache/auditwheel_tests')
ORIGINAL_NUMPY_WHEEL = 'numpy-1.11.0-cp35-cp35m-linux_x86_64.whl'
ORIGINAL_SIX_WHEEL = 'six-1.11.0-py2.py3-none-any.whl'
def find_src_folder():
candidate = op.abspath(op.join(op.dirname(__file__), '..'))
contents = os.listdir(candidate)
if 'setup.py' in contents and 'auditwheel' in contents:
return candidate
def docker_start(image, volumes={}, env_variables={}):
"""Start a long waiting idle program in container
Return the container id to be used for 'docker exec' commands.
"""
# Make sure to use the latest public version of the docker image
cmd = ['docker', 'pull', image]
if VERBOSE:
print("$ " + " ".join(cmd))
output = check_output(cmd).decode(ENCODING).strip()
if VERBOSE:
print(output)
cmd = ['docker', 'run', '-d']
for guest_path, host_path in sorted(volumes.items()):
cmd.extend(['-v', '%s:%s' % (host_path, guest_path)])
for name, value in sorted(env_variables.items()):
cmd.extend(['-e', '%s=%s' % (name, value)])
cmd.extend([image, 'sleep', '10000'])
if VERBOSE:
print("$ " + " ".join(cmd))
return check_output(cmd).decode(ENCODING).strip()
def docker_exec(container_id, cmd):
"""Executed a command in the runtime context of a running container."""
if isinstance(cmd, str):
cmd = cmd.split()
cmd = ['docker', 'exec', container_id] + cmd
if VERBOSE:
print("$ " + " ".join(cmd))
output = check_output(cmd).decode(ENCODING)
if VERBOSE:
print(output)
return output
@pytest.yield_fixture
def docker_container():
if find_executable("docker") is None:
pytest.skip('docker is required')
if not op.exists(WHEEL_CACHE_FOLDER):
os.makedirs(WHEEL_CACHE_FOLDER)
src_folder = find_src_folder()
if src_folder is None:
pytest.skip('Can only be run from the source folder')
io_folder = tempfile.mkdtemp(prefix='tmp_auditwheel_test_manylinux_',
dir=src_folder)
manylinux_id, python_id = None, None
try:
# Launch a docker container with volumes and pre-configured Python
# environment. The container main program will just sleep. Commands
# can be executed in that environment using the 'docker exec'.
# This container will be used to build and repair manylinux compatible
# wheels
manylinux_id = docker_start(
MANYLINUX_IMAGE_ID,
volumes={'/io': io_folder, '/auditwheel_src': src_folder},
env_variables={'PATH': PATH})
# Install the development version of auditwheel from source:
docker_exec(manylinux_id, 'pip install -U pip setuptools')
docker_exec(manylinux_id, 'pip install -U /auditwheel_src')
# Launch a docker container with a more recent userland to check that
# the generated wheel can install and run correctly.
python_id = docker_start(
PYTHON_IMAGE_ID,
volumes={'/io': io_folder, '/auditwheel_src': src_folder})
docker_exec(python_id, 'pip install -U pip')
yield manylinux_id, python_id, io_folder
finally:
for container_id in [manylinux_id, python_id]:
if container_id is None:
continue
try:
check_call(['docker', 'rm', '-f', container_id])
except CalledProcessError:
warnings.warn('failed to terminate and delete container %s'
% container_id)
shutil.rmtree(io_folder)
def test_build_repair_numpy(docker_container):
# Integration test repair numpy built from scratch
# First build numpy from source as a naive linux wheel that is tied
# to system libraries (atlas, libgfortran...)
manylinux_id, python_id, io_folder = docker_container
docker_exec(manylinux_id, 'yum install -y atlas atlas-devel')
if op.exists(op.join(WHEEL_CACHE_FOLDER, ORIGINAL_NUMPY_WHEEL)):
# If numpy has already been built and put in cache, let's reuse this.
shutil.copy2(op.join(WHEEL_CACHE_FOLDER, ORIGINAL_NUMPY_WHEEL),
op.join(io_folder, ORIGINAL_NUMPY_WHEEL))
else:
# otherwise build the original linux_x86_64 numpy wheel from source
# and put the result in the cache folder to speed-up future build.
# This part of the build is independent of the auditwheel code-base
# so it's safe to put it in cache.
docker_exec(manylinux_id,
'pip wheel -w /io --no-binary=:all: numpy==1.11.0')
shutil.copy2(op.join(io_folder, ORIGINAL_NUMPY_WHEEL),
op.join(WHEEL_CACHE_FOLDER, ORIGINAL_NUMPY_WHEEL))
filenames = os.listdir(io_folder)
assert filenames == [ORIGINAL_NUMPY_WHEEL]
orig_wheel = filenames[0]
assert 'manylinux' not in orig_wheel
# Repair the wheel using the manylinux1 container
docker_exec(manylinux_id, 'auditwheel repair -w /io /io/' + orig_wheel)
filenames = os.listdir(io_folder)
assert len(filenames) == 2
repaired_wheels = [fn for fn in filenames if 'manylinux1' in fn]
assert repaired_wheels == ['numpy-1.11.0-cp35-cp35m-manylinux1_x86_64.whl']
repaired_wheel = repaired_wheels[0]
output = docker_exec(manylinux_id, 'auditwheel show /io/' + repaired_wheel)
assert (
'numpy-1.11.0-cp35-cp35m-manylinux1_x86_64.whl is consistent'
' with the following platform tag: "manylinux1_x86_64"'
) in output.replace('\n', ' ')
# Check that the repaired numpy wheel can be installed and executed
# on a modern linux image.
docker_exec(python_id, 'pip install /io/' + repaired_wheel)
output = docker_exec(
python_id, 'python /auditwheel_src/tests/quick_check_numpy.py').strip()
assert output.strip() == 'ok'
# Check that numpy f2py works with a more recent version of gfortran
docker_exec(python_id, 'apt-get update -yqq')
docker_exec(python_id, 'apt-get install -y gfortran')
docker_exec(python_id, 'python -m numpy.f2py'
' -c /auditwheel_src/tests/foo.f90 -m foo')
# Check that the 2 fortran runtimes are well isolated and can be loaded
# at once in the same Python program:
docker_exec(python_id, ["python", "-c", "'import numpy; import foo'"])
def test_build_wheel_with_binary_executable(docker_container):
# Test building a wheel that contains a binary executable (e.g., a program)
manylinux_id, python_id, io_folder = docker_container
docker_exec(manylinux_id, 'yum install -y gsl-devel')
docker_exec(manylinux_id, ['bash', '-c', 'cd /auditwheel_src/tests/testpackage && python setup.py bdist_wheel -d /io'])
filenames = os.listdir(io_folder)
assert filenames == ['testpackage-0.0.1-py3-none-any.whl']
orig_wheel = filenames[0]
assert 'manylinux' not in orig_wheel
# Repair the wheel using the manylinux1 container
docker_exec(manylinux_id, 'auditwheel repair -w /io /io/' + orig_wheel)
filenames = os.listdir(io_folder)
assert len(filenames) == 2
repaired_wheels = [fn for fn in filenames if 'manylinux1' in fn]
assert repaired_wheels == ['testpackage-0.0.1-py3-none-manylinux1_x86_64.whl']
repaired_wheel = repaired_wheels[0]
output = docker_exec(manylinux_id, 'auditwheel show /io/' + repaired_wheel)
assert (
'testpackage-0.0.1-py3-none-manylinux1_x86_64.whl is consistent'
' with the following platform tag: "manylinux1_x86_64"'
) in output.replace('\n', ' ')
# Check that the repaired numpy wheel can be installed and executed
# on a modern linux image.
docker_exec(python_id, 'pip install /io/' + repaired_wheel)
output = docker_exec(
python_id, ['python', '-c', 'from testpackage import runit; print(runit(1.5))']).strip()
assert output.strip() == '2.25'
def test_build_repair_pure_wheel(docker_container):
manylinux_id, python_id, io_folder = docker_container
if op.exists(op.join(WHEEL_CACHE_FOLDER, ORIGINAL_SIX_WHEEL)):
# If six has already been built and put in cache, let's reuse this.
shutil.copy2(op.join(WHEEL_CACHE_FOLDER, ORIGINAL_SIX_WHEEL),
op.join(io_folder, ORIGINAL_SIX_WHEEL))
else:
docker_exec(manylinux_id,
'pip wheel -w /io --no-binary=:all: six==1.11.0')
shutil.copy2(op.join(io_folder, ORIGINAL_SIX_WHEEL),
op.join(WHEEL_CACHE_FOLDER, ORIGINAL_SIX_WHEEL))
filenames = os.listdir(io_folder)
assert filenames == [ORIGINAL_SIX_WHEEL]
orig_wheel = filenames[0]
assert 'manylinux' not in orig_wheel
# Repair the wheel using the manylinux1 container
docker_exec(manylinux_id, 'auditwheel repair -w /io /io/' + orig_wheel)
filenames = os.listdir(io_folder)
assert len(filenames) == 1 # no new wheels
assert filenames == [ORIGINAL_SIX_WHEEL]
output = docker_exec(manylinux_id, 'auditwheel show /io/' + filenames[0])
assert ''.join([
ORIGINAL_SIX_WHEEL,
' is consistent with the following platform tag: ',
'"manylinux1_x86_64". ',
'The wheel references no external versioned symbols from system- ',
'provided shared libraries. ',
'The wheel requires no external shared libraries! :)',
]) in output.replace('\n', ' ')
| StarcoderdataPython |
9638437 | <reponame>webbjj/clustertools
""" Generic recipes for making key calculations
"""
__author__ = "<NAME>"
__all__ = [
'nbinmaker',
"binmaker",
'roaming_nbinmaker',
"roaming_binmaker",
"power_law_distribution_function",
"dx_function",
"tapered_dx_function",
"x_hist",
"mean_prof",
"smooth",
"interpolate",
"minimum_distance",
"distance",
]
import numpy as np
import numba
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from ..util.plots import _plot,_lplot
def nbinmaker(x, nbin=10, nsum=False):
"""Split an array into bins with equal numbers of elements
Parameters
----------
x : float
input array
nbin : int
number of bins
nsum : bool
return number of points in each bin (default: False)
Returns
-------
x_lower : float
lower bin values
x_mid : float
mean value in each bin
x_upper : float
upper bin values
x_hist :
number of points in bin
if nsum==True:
x_sum : float
sum of point values in each bin
History
-------
2018 - Written - Webb (UofT)
"""
x = np.asarray(x)
xorder = np.argsort(x)
x_lower = np.array([])
x_upper = np.array([])
x_hist = np.array([])
x_sum = np.array([])
x_mid = np.array([])
for i in range(0, nbin):
indx = int(float(i) * float(len(x)) / float(nbin))
x_lower = np.append(x_lower, x[xorder[indx]])
x_upper=x_lower[1:]
x_upper=np.append(x_upper,np.amax(x))
indx = x_lower != x_upper
x_lower = x_lower[indx]
x_upper = x_upper[indx]
for i in range(0, np.sum(indx)):
if i<np.sum(indx)-1:
xindx = (x >= x_lower[i]) * (x < x_upper[i])
else:
xindx = (x >= x_lower[i])
x_hist = np.append(x_hist, np.sum(xindx))
x_sum = np.append(x_sum, np.sum(x[xindx]))
x_mid = np.append(x_mid, x_sum[i] / x_hist[i])
if nsum:
return x_lower, x_mid, x_upper, x_hist, x_sum
else:
return x_lower, x_mid, x_upper, x_hist
def binmaker(x, nbin=10, nsum=False, steptype="linear"):
"""Split an array into bins of equal width
Parameters
----------
x : float
input array
nbin : int
number of bins
nsum : bool
return number of points in each bin (default: False)
steptype : str
linear or logarithmic steps (default: linear)
Returns
-------
x_lower : float
lower bin values
x_mid : float
mean value in each bin
x_upper : float
upper bin values
x_hist :
number of points in bin
if nsum==True:
x_sum : float
sum of point values in each bin
History
-------
2018 - Written - Webb (UofT)
"""
x_hist = np.zeros(nbin)
x_sum = np.zeros(nbin)
x = np.array(x)
if steptype == "linear":
steps = np.linspace(np.amin(x), np.amax(x), nbin + 1)
else:
steps = np.logspace(np.log10(np.amin(x)), np.log10(np.amax(x)), nbin + 1)
x_lower = steps[:-1]
x_upper = steps[1:]
x_mid = (x_upper + x_lower) / 2.0
for j in range(0, nbin):
if j<nbin-1:
indx = (x >= x_lower[j]) * (x < x_upper[j])
else:
indx = (x >= x_lower[j]) * (x <= x_upper[j])
x_hist[j] = len(x[indx])
x_sum[j] = np.sum(x[indx])
if nsum:
return x_lower, x_mid, x_upper, x_hist, x_sum
else:
return x_lower, x_mid, x_upper, x_hist
def roaming_nbinmaker(x, nbin=10, ntot=20, nsum=False):
"""Split an array into bins with equal numbers of elements
Parameters
----------
x : float
input array
nbin : int
number of bins to set bin fraction
ntot : int
number of total bins
nsum : bool
return number of points in each bin (default: False)
Returns
-------
x_lower : float
lower bin values
x_mid : float
mean value in each bin
x_upper : float
upper bin values
x_hist :
number of points in bin
if nsum==True:
x_sum : float
sum of point values in each bin
History
-------
2018 - Written - Webb (UofT)
"""
xs = np.sort(x)
dx=1.0/float(nbin)
xfrac_lower=np.linspace(0.,1-dx,ntot)
xfrac_upper=xfrac_lower+dx
x_lower = xs[(xfrac_lower*(len(x)-1)).astype(int)]
x_upper = xs[(xfrac_upper*(len(x)-1)).astype(int)]
x_hist = np.array([])
x_sum = np.array([])
x_mid = np.array([])
indx = x_lower != x_upper
x_lower = x_lower[indx]
x_upper = x_upper[indx]
for i in range(0, np.sum(indx)):
if i<np.sum(indx)-1:
xindx = (x >= x_lower[i]) * (x < x_upper[i])
else:
xindx = (x >= x_lower[i])
x_hist = np.append(x_hist, np.sum(xindx))
x_sum = np.append(x_sum, np.sum(x[xindx]))
x_mid = np.append(x_mid, x_sum[i] / x_hist[i])
if nsum:
return x_lower, x_mid, x_upper, x_hist, x_sum
else:
return x_lower, x_mid, x_upper, x_hist
def roaming_binmaker(x, nbin=10, ntot=20, nsum=False, steptype="linear"):
"""Split an array into bins of equal width with a roaming average
Parameters
----------
x : float
input array
nbin : int
number of bins to set bin width
ntot : int
number of total bins
nsum : bool
return number of points in each bin (default: False)
steptype : str
linear or logarithmic steps (default: linear)
Returns
-------
x_lower : float
lower bin values
x_mid : float
mean value in each bin
x_upper : float
upper bin values
x_hist :
number of points in bin
if nsum==True:
x_sum : float
sum of point values in each bin
History
-------
2018 - Written - Webb (UofT)
"""
if steptype=='linear':
xmin=np.amin(x)
xmax=np.amax(x)
dx=np.fabs(xmax-xmin)/nbin
x_lower=np.linspace(xmin,xmax-dx,ntot)
x_upper=x_lower+dx
x_mid=(x_upper+x_lower)/2.
else:
xmin=np.amin(np.log10(x))
xmax=np.amax(np.log10(x))
dx=np.fabs(xmax-xmin)/nbin
x_lower=np.logspace(xmin,xmax-dx,ntot)
x_upper=np.logspace(xmin+dx,xmax,ntot)
x_mid=10.0**((np.log10(x_upper)+np.log10(x_lower))/2.)
x_hist=np.array([])
x_sum=np.array([])
for j in range(0, len(x_lower)):
if j<len(x_lower)-1:
indx = (x >= x_lower[j]) * (x < x_upper[j])
else:
indx = (x >= x_lower[j]) * (x <= x_upper[j])
x_hist = np.append(x_hist,np.sum(indx))
x_sum = np.append(x_sum,np.sum(x[indx]))
if nsum:
return x_lower, x_mid, x_upper, x_hist, x_sum
else:
return x_lower, x_mid, x_upper, x_hist
def power_law_distribution_function(n, alpha, xmin, xmax):
"""Generate points from a power-law distribution function
Parameters
----------
n : int
number of points
alpha : float
power-law slope of distribution function
xmin,xmax : float
minimum and maximum values of distribution
Returns
-------
x : float
array of values drawn from distribution
History
-------
2019 - Written - Webb (UofT)
"""
eta = alpha + 1.0
if xmin == xmax:
x = xmin
elif alpha == 0:
x = xmin + np.random.random(n) * (xmax - xmin)
elif alpha > 0:
x = xmin + np.random.power(eta, n) * (xmax - xmin)
elif alpha < 0 and alpha != -1.0:
x = (xmin ** eta + (xmax ** eta - xmin ** eta) * np.random.rand(n)) ** (
1.0 / eta
)
elif alpha == -1:
x = np.log10(xmin) + np.random.random(n) * (np.log10(xmax) - np.log10(xmin))
x = 10.0 ** x
return np.array(x)
def dx_function(x, nx=10, bintype="num", x_lower=None, x_mean=None,x_upper=None, plot=False, **kwargs):
"""Find distribution function using nx bins
Parameters
----------
x : float
input arrayå
nx : int
number of bins (default : 10)
bintype : str
bin with equal number of stars per bin (num) or evenly in x (fix) (default: num)
x_lower,x_mean,x_upper : float
preset lower limit, mean value, and upper limit bins
Returns
-------
x_mean : float
mean value in each bin
x_hist : float
number of stars in each bin
dx : float
number of stars in each bin divided by width of bin
alpha : float
power law slope fit to dx vs x_mean
ealpha : float
error in alpha
yalpha : float
y-intercept of fit to log(dx) vs lod(x_mean)
eyalpha : float
error in yalpha
History
-------
2018 - Written - Webb (UofT)
"""
if x_lower is None:
if bintype == "num":
x_lower, x_mean, x_upper, x_hist = nbinmaker(x, nx)
else:
x_lower, x_mean, x_upper, x_hist = binmaker(x, nx)
else:
x_hist=np.array([])
for i in range(0, len(x_lower)):
indx = (x >= x_lower[i]) * (x < x_upper[i])
x_hist = np.append(x_hist, np.sum(indx))
dx = x_hist / (x_upper - x_lower)
indx=dx>0
lx_mean = np.log10(x_mean[indx])
ldx = np.log10(dx[indx])
(alpha, yalpha), V = np.polyfit(lx_mean, ldx, 1, cov=True)
ealpha = np.sqrt(V[0][0])
eyalpha = np.sqrt(V[1][1])
if plot:
filename = kwargs.get("filename", None)
_plot(x_mean[indx], np.log10(dx[indx]), xlabel="x", ylabel="LOG(dN/dx)", **kwargs)
xfit = np.linspace(np.min(x_mean), np.max(x_mean), nx)
dxfit = 10.0 ** (alpha * np.log10(xfit) + yalpha)
kwargs.pop("overplot",None)
_lplot(
xfit, np.log10(dxfit), overplot=True, label=(r"$\alpha$ = %f" % alpha), **kwargs
)
plt.legend()
if filename != None:
plt.savefig(filename)
return x_mean[indx], x_hist[indx], dx[indx], alpha, ealpha, yalpha, eyalpha
def tapered_dx_function(x, nx=10, bintype="num", x_lower=None, x_mean=None,x_upper=None, plot=False, **kwargs):
"""Find distribution function using nx bins
Parameters
----------
x : float
input arrayå
nx : int
number of bins (default : 10)
bintype : str
bin with equal number of stars per bin (num) or evenly in x (fix) (default: num)
x_lower,x_mean,x_upper : float
preset lower limit, mean value, and upper limit bins
Returns
-------
x_mean : float
mean value in each bin
x_hist : float
number of stars in each bin
dx : float
number of stars in each bin divided by width of bin
alpha : float
power law slope fit to dx vs x_mean
ealpha : float
error in alpha
yalpha : float
y-intercept of fit to log(dx) vs lod(x_mean)
eyalpha : float
error in yalpha
History
-------
2018 - Written - Webb (UofT)
"""
if x_lower is None:
if bintype == "num":
x_lower, x_mean, x_upper, x_hist = nbinmaker(x, nx)
else:
x_lower, x_mean, x_upper, x_hist = binmaker(x, nx)
else:
x_hist=np.array([])
for i in range(0, len(x_lower)):
indx = (x >= x_lower[i]) * (x < x_upper[i])
x_hist = np.append(x_hist, np.sum(indx))
dx = x_hist / (x_upper - x_lower)
indx=dx>0
lx_mean = np.log10(x_mean[indx])
ldx = np.log10(dx[indx])
(A, alpha, xc, beta), V=curve_fit(tapered_func,10.0**np.array(lx_mean),10.0**np.array(ldx) ,bounds=([0.,-1.*np.inf,np.amin(x),-1.*np.inf],[np.inf,np.inf,np.amax(x),np.inf]))
eA = np.sqrt(V[0][0])
ealpha = np.sqrt(V[1][1])
exc = np.sqrt(V[2][2])
ebeta = np.sqrt(V[3][3])
if plot:
filename = kwargs.get("filename", None)
_plot(x_mean[indx], np.log10(dx[indx]), xlabel="x", ylabel="LOG(dN/dx)", **kwargs)
xfit = np.linspace(np.min(x_mean), np.max(x_mean), nx)
dxfit = tapered_func(xfit,A,alpha,xc,beta)
kwargs.pop("overplot",None)
_lplot(
xfit, np.log10(dxfit), overplot=True, label=(r"$\alpha$ = %f" % alpha), **kwargs
)
plt.legend()
if filename != None:
plt.savefig(filename)
return x_mean[indx], x_hist[indx], dx[indx], A, eA, alpha, ealpha, xc, exc, beta, ebeta
def tapered_func(x,A,alpha,xc,beta):
dx=A*(x**alpha)*(1.0-np.exp(-1.*(x/xc)**beta))
return dx
def x_hist(x, nx=10, bintype="num", x_lower=None, x_mean=None,x_upper=None):
"""Find histogram data using nx bins
Parameters
----------
x : float
input arrayå
nx : int
number of bins (default : 10)
bintype : str
bin with equal number of stars per bin (num) or evenly in x (fix) (default: num)
x_lower,x_mean,x_upper : float
preset lower limit, mean value, and upper limit bins
Returns
-------
x_mean : float
mean value in each bin
x_his : float
number of stars in each bin
History
-------
2019 - Written - Webb (UofT)
"""
if x_lower is None:
if bintype == "num":
x_lower, x_mean, x_upper, x_hist = nbinmaker(x, nx)
else:
x_lower, x_mean, x_upper, x_hist = binmaker(x, nx)
else:
x_hist=np.array([])
for i in range(0, len(x_lower)):
indx = (x >= x_lower[i]) * (x < x_upper[i])
x_hist = np.append(x_hist, np.sum(indx))
return x_mean,x_hist
def mean_prof(x, y, nbin=10, bintype="num", steptype="linear", median=False, x_lower=None, x_mean=None,x_upper=None):
""" Calculate mean profile of parameter y that depends on x
Parameters
----------
x,y : float
coordinates from which to measure the mean profile
nbin : int
number of bins
bintype : str
can be bins of fixed size ('fix') or equal number of stars ('num') (default: num)
steptype : str
for fixed size arrays, set step size to 'linear' or 'log'
median : bool
find median instead of mean (Default: False)
x_lower,x_mean,x_upper : float
preset lower limit, mean value, and upper limit bins
Returns
-------
x_bin : float
x values of mean profile
y_bin : float
y values of mean profile
y_sig : float
dispersion about the mean profile
History
-------
2018 - Written - Webb (UofT)
"""
if x_lower is None:
if bintype == "num":
x_lower, x_mid, x_upper, x_hist = nbinmaker(x, nbin)
else:
x_lower, x_mid, x_upper, x_hist = binmaker(x, nbin, steptype=steptype)
else:
x_mid=x_mean
x_hist=np.array([])
for i in range(0, len(x_lower)):
indx = (x >= x_lower[i]) * (x < x_upper[i])
x_hist = np.append(x_hist, np.sum(indx))
y_bin = []
y_sig = []
x_bin = []
for i in range(0, len(x_lower)):
indx = (x >= x_lower[i]) * (x < x_upper[i])
if True in indx:
x_bin = np.append(x_bin, x_mid[i])
if x_hist[i] > 1:
if median:
y_bin = np.append(y_bin, np.median(y[indx]))
else:
y_bin = np.append(y_bin, np.mean(y[indx]))
y_sig = np.append(y_sig, np.std(y[indx]))
elif x_hist[i] == 1:
y_bin = np.append(y_bin, y[indx])
y_sig = np.append(y_sig, 0.0)
else:
y_bin = np.append(y_bin, 0.0)
y_sig = np.append(y_sig, 0.0)
return np.array(x_bin), np.array(y_bin), np.array(y_sig)
def smooth(x, y, dx, bintype="num", median=False):
"""Smooth a profile
Parameters
----------
x,y : float
coordinates from which to measure the mean profile
dx : float
width of x smoothening bin
bintype : str
can be bins of fixed size ('fix') or equal number of stars ('num') (default: num)
steptype : str
for fixed size arrays, set step size to 'linear' or 'log'
median : bool
find median instead of mean (Default: False)
Returns
-------
x_bin : float
x values of mean profile
y_bin : float
y values of mean profile
y_sig : float
dispersion about the mean profile
Other Parameters
----------------
dx : float
width of smoothening bin
History
-------
2018 - Written - Webb (UofT)
"""
x = np.array(x)
y = np.array(y)
# Smooth by intervals in dx
nbin = int((np.max(x) - np.min(x)) / dx)
if bintype == "num":
x_lower, x_mid, x_upper, x_hist = nbinmaker(x, nbin)
else:
x_lower, x_mid, x_upper, x_hist = binmaker(x, nbin)
y_bin = []
y_sig = []
x_bin = []
y_max = []
y_min = []
for i in range(0, nbin):
indx = (x >= x_lower[i]) * (x < x_upper[i])
if True in indx:
x_bin = np.append(x_bin, x_mid[i])
if x_hist[i] > 1:
if median:
y_bin = np.append(y_bin, np.median(y[indx]))
else:
y_bin = np.append(y_bin, np.mean(y[indx]))
y_sig = np.append(y_sig, np.std(y[indx]))
y_min = np.append(y_min, np.min(y[indx]))
y_max = np.append(y_max, np.max(y[indx]))
elif x_hist[i] == 1:
y_bin = np.append(y_bin, y[indx])
y_sig = np.append(y_sig, 0.0)
y_min = np.append(y_min, y[indx])
y_max = np.append(y_max, y[indx])
else:
y_bin = np.append(y_bin, 0.0)
y_sig = np.append(y_sig, 0.0)
y_min = np.append(y_min, 0.0)
y_max = np.append(y_max, 0.0)
return x_bin, y_bin, y_sig, y_min, y_max
def interpolate(r1, r2, x=None, y=None):
"""Perform simple linear interpolation between two points in 2D
- one of x or y must be defined
Parameters
----------
r1,r2 : float
x,y coordinates from which to interpolate
x : float
x-value from which to interpolate y (default: None)
y : float
y-value from which to interpolate x (default: None)
Returns
-------
val : float
interpolated value
History
2019 - Written - Webb (UofT)
"""
x1, y1 = r1
x2, y2 = r2
m = (y2 - y1) / (x2 - x1)
b = y1 - m * x1
if x != None:
val= m * x + b
elif y != None:
val=(y - b) / m
else:
print("NO INTERMEDIATE COORDINATE GIVEN")
val=0
return val
@numba.njit
def minimum_distance(x):
"""Find distance to each point's nearest neighbour
Parameters
----------
x : float (array)
3D position of each point of the form [x,y,z].Transpose
Returns
-------
min_distance : float
distance to each points nearest neighbour
History
-------
2019 - Written - Webb (UofT)
"""
min_distance = [-1.0] * len(x)
for i in range(len(x) - 1):
for j in range(i + 1, len(x)):
r = distance(x[i], x[j])
if min_distance[i] < 0:
min_distance[i] = r
else:
min_distance[i] = np.minimum(min_distance[i], r)
if min_distance[j] < 0:
min_distance[j] = r
else:
min_distance[j] = np.minimum(min_distance[j], r)
return min_distance
@numba.njit
def distance(x1, x2):
"""Find distance between two points (made for use with numba)
Parameters
----------
x1 : float
3D position of first point of the form [x,y,z]
x2 : float
3D position of first point of the form [x,y,z]
Returns
-------
distance : float
distance between the two points
History
-------
2019 - Written - Webb (UofT)
"""
dx = x2[0] - x1[0]
dy = x2[1] - x1[1]
dz = x2[2] - x1[2]
r = (dx * dx + dy * dy + dz * dz) ** 0.5
return r
| StarcoderdataPython |
301437 | <reponame>Mario-Kart-Felix/poemexe
#!/usr/bin/env python3
import argparse
import codecs
import json
import os
import re
import sys
from glob import glob
OUTPUT_FILENAME = '../model/verses.json'
RE_CONTAINS_URL = re.compile('https?://')
RE_WEAK_LINE = re.compile(' (and|by|from|is|of|that|the|with)\\n',
flags=re.IGNORECASE)
REPLACE = (
# convert smart quotes
(re.compile(u'\u2018|\u2019'), "'"),
(re.compile(u'\u201C|\u201D'), '"'),
# remove double quotes
(re.compile('"'), ''),
# em dash
(re.compile(u'--|\u2013|\\B-|-\\B|~'), u'\u2014'),
(re.compile(u'\u2014'), u' \u2014'),
# ellipsis
(re.compile(r'\.( \.|\.)+'), u'\u2026'),
(re.compile(u'\\b \u2026'), u'\u2026'),
)
output_haiku = []
output_texts = []
def parse_credit(line):
line = line.strip()
return line[2:].lstrip() if line.startswith('//') else None
def convert_dirs(dirs, export=False):
for dirname in dirs:
for fn in glob(os.path.join(dirname, '*.txt')):
print('Reading {}'.format(fn))
input_lines = None
with codecs.open(fn, encoding='utf-8') as fp:
input_lines = fp.readlines()
# find a credit line for the file, if one exists
file_credit = parse_credit(input_lines[0])
if file_credit:
input_lines.pop(0)
# strip trailing whitespace and comments
input_lines = (line.split('#', 1)[0].strip()
for line in input_lines
if not line.lstrip().startswith('#'))
# select all unique verses,
# where each verse is separated by an empty line
unique_haiku = set()
for haiku in '\n'.join(input_lines).split('\n\n'):
haiku = haiku.strip().lower()
if RE_CONTAINS_URL.search(haiku):
raise ValueError('haiku includes a url: {}'
.format(haiku))
elif RE_WEAK_LINE.search(haiku):
raise ValueError('haiku contains a weak line: {}'
.format(haiku))
for regex, repl in REPLACE:
haiku = regex.sub(repl, haiku)
if haiku:
unique_haiku.add(haiku)
print(' %d unique haiku' % len(unique_haiku))
# separate the lines into first, middle, and last buckets
for haiku in unique_haiku:
haiku_lines = haiku.split('\n')
credit = file_credit
line_count = len(haiku_lines)
output_lines = []
buckets = [[], [], []]
for i, line in enumerate(haiku_lines):
bucket = 0 if i == 0 else (2 if i == line_count - 1 else 1)
tokens = line.split()
if not tokens:
continue
line = ' '.join(tokens)
output_lines.append(line)
buckets[bucket].append(line)
if output_lines:
obj = {
'a': tuple(buckets[0]),
'b': tuple(buckets[1]),
'c': tuple(buckets[2]),
}
if credit:
obj['source'] = credit
output_haiku.append(obj)
output_texts.append(' / '.join(output_lines))
if export:
print('Writing {}'.format(OUTPUT_FILENAME))
with codecs.open(OUTPUT_FILENAME, 'w', encoding='utf-8') as fp:
json.dump(output_haiku, fp,
indent=2, sort_keys=True, ensure_ascii=False)
maxlen = max(len(text) for text in output_texts)
print('Wrote {} poems with maximum length {}'
.format(len(output_haiku), maxlen))
return output_haiku
def main():
parser = argparse.ArgumentParser()
parser.add_argument('srcdir', nargs='*')
args = parser.parse_args()
if not args.srcdir:
parser.print_help()
return 1
convert_dirs(args.srcdir, export=True)
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
292578 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-10 12:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20180310_1024'),
]
operations = [
migrations.AddField(
model_name='post',
name='pv',
field=models.PositiveIntegerField(default=0, verbose_name='pv'),
),
migrations.AddField(
model_name='post',
name='uv',
field=models.PositiveIntegerField(default=0, verbose_name='uv'),
),
migrations.AlterField(
model_name='category',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='category',
name='is_nav',
field=models.BooleanField(default=False, verbose_name='是否为导航'),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=128, verbose_name='名字'),
),
migrations.AlterField(
model_name='category',
name='status',
field=models.PositiveIntegerField(choices=[(1, '正常'), (2, '删除')], default=1, verbose_name='状态'),
),
migrations.AlterField(
model_name='category',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AlterField(
model_name='post',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category', verbose_name='分类'),
),
migrations.AlterField(
model_name='post',
name='content',
field=models.TextField(help_text='正文必须是MarkDown', verbose_name='正文'),
),
migrations.AlterField(
model_name='post',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='post',
name='desc',
field=models.CharField(blank=True, max_length=1024, verbose_name='摘要'),
),
migrations.AlterField(
model_name='post',
name='is_markdown',
field=models.BooleanField(default=True, verbose_name='使用markdown'),
),
migrations.AlterField(
model_name='post',
name='last_update_time',
field=models.DateTimeField(auto_now=True, verbose_name='最后修改时间'),
),
migrations.AlterField(
model_name='post',
name='status',
field=models.PositiveIntegerField(choices=[(1, '上线'), (2, '删除')], default=1, verbose_name='状态'),
),
migrations.AlterField(
model_name='post',
name='tag',
field=models.ManyToManyField(related_name='posts', to='blog.Tag', verbose_name='标签'),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(max_length=128, verbose_name='标题'),
),
migrations.AlterField(
model_name='post',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AlterField(
model_name='tag',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='tag',
name='name',
field=models.CharField(max_length=128, verbose_name='名字'),
),
migrations.AlterField(
model_name='tag',
name='status',
field=models.PositiveIntegerField(choices=[(1, '正常'), (2, '删除')], default=1, verbose_name='状态'),
),
migrations.AlterField(
model_name='tag',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
]
| StarcoderdataPython |
1627830 | <gh_stars>10-100
# coding=utf-8
import octoprint.plugin
import octoprint.filemanager
import octoprint.filemanager.util
from octoprint.util.comm import strip_comment
class CommentStripper(octoprint.filemanager.util.LineProcessorStream):
def process_line(self, line):
line = strip_comment(line).strip()
if not len(line):
return None
return line + "\r\n"
def strip_all_comments(path, file_object, links=None, printer_profile=None, allow_overwrite=True, *args, **kwargs):
if not octoprint.filemanager.valid_file_type(path, type="gcode"):
return file_object
import os
name, _ = os.path.splitext(file_object.filename)
if not name.endswith("_strip"):
return file_object
return octoprint.filemanager.util.StreamWrapper(file_object.filename, CommentStripper(file_object.stream()))
__plugin_name__ = "Strip comments from GCODE"
__plugin_description__ = "Strips all comments and empty lines from uploaded/generated GCODE files ending on the name " \
"postfix \"_strip\", e.g. \"some_file_strip.gcode\"."
__plugin_pythoncompat__ = ">=2.7,<4"
__plugin_hooks__ = {
"octoprint.filemanager.preprocessor": strip_all_comments
}
| StarcoderdataPython |
3289787 | from functools import partial
from plenum.common.messages.node_messages import ViewChangeStartMessage
from plenum.test.delayers import msg_rep_delay
from plenum.test.helper import sdk_send_random_and_check, assertExp, waitForViewChange
from plenum.test import waits
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected
from plenum.test.restart.helper import get_group, restart_nodes
from plenum.test.stasher import delay_rules
from plenum.test.test_node import checkNodesConnected, ensureElectionsDone
from plenum.test.view_change.helper import start_stopped_node
from stp_core.loop.eventually import eventually
def test_restart_node_with_view_changes(tdir, tconf,
looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
allPluginsPath):
'''
1. Stop the node Delta
2. Patch methods for processing VCStartMsgStrategy messages
3. Delay CurrentState messages on Delta
4. Start Delta
5. Start view change with a maser degradation reason (from view 0 to 1)
6. Check that Delta start VCStartMsgStrategy after quorum of InstanceChanges
7. Reset delay for CurrentStates
8. Check that propagate primary happened.
9. Unpatch VCStartMsgStrategy methods and process catching messages.
10. Start view change with a maser degradation reason (from view 1 to 2)
11. Check that all nodes has viewNo = 2 and can order transactions.
'''
# Prepare nodes
lagging_node = txnPoolNodeSet[-1]
rest_nodes = txnPoolNodeSet[:-1]
start_view_no = lagging_node.viewNo
# Stop Delta
waitNodeDataEquality(looper, lagging_node, *rest_nodes)
disconnect_node_and_ensure_disconnected(looper,
txnPoolNodeSet,
lagging_node,
stopNode=True)
looper.removeProdable(lagging_node)
# Send more requests to active nodes
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, len(rest_nodes) * 3)
waitNodeDataEquality(looper, *rest_nodes)
# Restart stopped node
lagging_node = start_stopped_node(lagging_node,
looper,
tconf,
tdir,
allPluginsPath,
start=False,
)
# Add to lagging_node node route a patched method for processing
# ViewChangeStartMessage to delay processing.
global view_change_started_messages
view_change_started_messages = []
def patch_on_view_change_started(node, msg, frm):
view_change_started_messages.append((node, msg, frm))
processor = partial(patch_on_view_change_started,
lagging_node)
lagging_node.nodeMsgRouter.add((ViewChangeStartMessage, processor))
# Delay CurrentState messages on lagging_node to delay propagate primary
with delay_rules(lagging_node.nodeIbStasher, msg_rep_delay()):
# Add lagging_node to pool
looper.add(lagging_node)
txnPoolNodeSet[-1] = lagging_node
looper.run(checkNodesConnected(txnPoolNodeSet))
looper.run(
eventually(lambda: assertExp(len(lagging_node.nodeIbStasher.delayeds) >= 3)))
# Start ViewChange (0 -> 1)
for n in rest_nodes:
n.view_changer.on_master_degradation()
# Lagging node still did not catchup, so it can't participate and process I_CH
looper.run(
eventually(
lambda: assertExp(len(view_change_started_messages) == 0)))
# Lagging node catches up till old view
looper.run(
eventually(
lambda: assertExp(lagging_node.viewNo == start_view_no)))
# Unpatch ViewChangeStartMessages processing and process delayed messages
for msg in view_change_started_messages:
lagging_node.view_changer.node.nodeInBox.append((msg[1],
lagging_node.view_changer.node.name))
waitForViewChange(looper,
txnPoolNodeSet,
expectedViewNo=start_view_no + 1,
customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(txnPoolNodeSet)))
# Start ViewChange (1 -> 2)
for n in rest_nodes:
n.view_changer.on_master_degradation()
waitForViewChange(looper,
txnPoolNodeSet,
expectedViewNo=start_view_no + 2,
customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(txnPoolNodeSet)))
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet,
instances_list=range(txnPoolNodeSet[0].requiredNumberOfInstances))
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 1)
waitNodeDataEquality(looper, *txnPoolNodeSet)
| StarcoderdataPython |
6486810 | <gh_stars>1-10
# -*- coding:utf-8 -*-
"""
@author:SiriYang
@file: Result.py
@time: 2019.12.25 12:43
@updateTime: 2021-02-06 20:28:06
@codeLines: 46
"""
class ResultEnum(object):
"""
返回结果枚举类型,积极结果使用偶数代码,消极结果使用奇数代码。
"""
# 基础
UNKNOW_ERROR = (99999, "Unknow error")
SUCCESS = (0, "Success")
FAULT = (1, "Fault")
# 翻译服务
TRANSLATE_ERROR = (101, "翻译出错!")
BAIDUAPPID_VOID = (111, "百度appID为空!")
BAIDUKEY_VOID = (113, "百度Key为空!")
# 网络
URL_INVALID = (401, "这不是一个有效链接!")
NET_TIME_OUT = (403, "连接超时!")
class Result(object):
#Result code
mCode = None
#Result information
mInfo = None
#Result data
mData = None
def __init__(self, resultEnum, data=None):
self.mCode = resultEnum[0]
self.mInfo = resultEnum[1]
self.mData = data
if data == None:
self.mData = resultEnum[1]
def __del__(self):
pass
def getCode(self):
return self.mCode
def setCode(self, code):
self.mCode = code
def getInfo(self):
return self.mInfo
def setInfo(self, info):
self.mInfo = info
def getData(self):
return self.mData
def setData(self, data):
self.mData = data
def equal(self, enum):
return self.mCode == enum[0]
def isPositive(self):
if (self.mCode % 2 == 0):
return True
else:
return False
def toString(self):
return "Error Code " + str(self.mCode) + " : " + self.mInfo
if __name__ == "__main__":
res = Result(ResultEnum.SUCCESS)
if (res.isPositive()):
print(res.toString())
| StarcoderdataPython |
9612907 | <reponame>kyleect/vom
from selenium import webdriver
from vom import ViewDriver
if __name__ == '__main__':
options = webdriver.ChromeOptions()
options.add_argument("--headless")
driver = ViewDriver(webdriver.Chrome(chrome_options=options))
driver.get("http://example.com")
page = driver.find_element_by_tag_name("div")
page.header = page.find_element_by_tag_name("h1")
page.texts = page.find_elements_by_tag_name("p")
page.click()
print(page.header)
| StarcoderdataPython |
6693889 | # Program based on Reverse of 4 digit numbers
import math
n = int(input("Enter numbers: "))
a = n%10
b = math.floor(n/10)
b1 = b%10
c = math.floor(n/100)
c1 = c%10
d = math.floor(n/1000)
print(a*1000+b1*100+c1*10+d)
| StarcoderdataPython |
3458076 | <filename>PyPtero/__init__.py
from . import Sync | StarcoderdataPython |
237006 | <filename>plugins/geometry/crop.py
import itk
import medipy.itk
def crop(input, index, shape):
""" Return a sub-image from the input image, starting at index and with
given shape.
<gui>
<item name="input" type="Image" label="Input"/>
<item name="index" type="Coordinates" label="Start"/>
<item name="shape" type="Array" initializer="type=int" label="Shape"/>
<item name="output" type="Image" initializer="output=True" role="return"
label="Output"/>
</gui>
"""
itk_input = medipy.itk.medipy_image_to_itk_image(input, False)
itk_index = [x for x in reversed(index)]
itk_shape = [x for x in reversed(shape)]
# Use RegionOfInterestImageFilter since we wish to modify the output's origin
region = itk_input.GetRequestedRegion().__class__(itk_index, itk_shape)
filter = itk.RegionOfInterestImageFilter[itk_input, itk_input].New(
Input=itk_input, RegionOfInterest=region)
filter()
itk_output = filter[0]
output = medipy.itk.itk_image_to_medipy_image(itk_output, None, True)
return output
| StarcoderdataPython |
9667813 | """
Author: <NAME>
Email: <EMAIL>
Description: Item/Product quantity functions
Interfacing with Open Cart API
"""
from decorators import authenticated_opencart, get_only
from utils import oc_requests, sync_info
from datetime import datetime
from frappe.utils import get_files_path, flt, cint
import frappe, json, os, traceback, base64
OC_PROD_ID = 'oc_product_id'
OC_CAT_ID = 'opencart_category_id'
# Update item quantity
@authenticated_opencart
def update_item_qty_handler(doc, site_doc, api_map, headers, silent=False):
logs = []
success = False
qty = get_item_qty(doc)
# Cannot find product id -> cannot sync
if (not doc.get(OC_PROD_ID)):
sync_info(logs, 'Product ID for Opencart is missing', stop=True, silent=silent, error=True)
data = [{
"product_id": doc.get(OC_PROD_ID),
"quantity": str(cint(qty))
}]
# Push qty to opencart
res = oc_requests(site_doc.get('server_base_url'), headers, api_map, 'Product Quantity', stop=False, data=data)
if res:
# Not successful
if (not res.get('success')):
sync_info(logs, 'Quantity for product %s not updated on Opencart. Error: %s' %(doc.get('name'), res.get('error')), stop=False, silent=silent, error=True)
else:
success = True
sync_info(logs, 'Quantity for product %s successfully updated on Opencart'%doc.get('name'), stop=False, silent=silent)
return {
'success': success,
'logs': logs
}
@frappe.whitelist()
def update_item_qty(doc_name, silent=False):
item = frappe.get_doc("Item", doc_name)
return update_item_qty_handler(item, silent=silent)
# Return the current qty of item based on item code (or Item Doc Name)
@frappe.whitelist()
def get_item_qty_by_name(doc_name):
item = frappe.get_doc("Item", doc_name)
return get_item_qty(item)
# TODO: Write test to make sure this function is correct. Note: current query all transaction.
# This can create overhead time.
def get_item_qty(item):
# Query stock ledger to get qty
item_ledgers = frappe.db.sql("""select item_code, warehouse, posting_date, actual_qty, valuation_rate, \
stock_uom, company, voucher_type, qty_after_transaction, stock_value_difference \
from `tabStock Ledger Entry` \
where docstatus < 2 and item_code = '%s' order by posting_date, posting_time, name""" \
%item.get('item_code'), as_dict=1)
# Calculate the qty based purely on stock transaction record
bal_qty = 0
for d in item_ledgers:
if d.voucher_type == "Stock Reconciliation":
qty_diff = flt(d.qty_after_transaction) - bal_qty
else:
qty_diff = flt(d.actual_qty)
bal_qty += qty_diff
# Adjust this by Sales Order that've been confirmed but not completely delivered
sales_order_items = frappe.db.sql("""select * from `tabSales Order Item` where item_code = '%s' \
and parent in (select name from `tabSales Order` \
where docstatus < 2 and \
(per_delivered is NULL or per_delivered != 100))"""%item.get('item_code'), as_dict=1)
for so_item in sales_order_items:
bal_qty -= flt(so_item.get('qty'))
dn_items = frappe.get_list("Delivery Note Item", {'docstatus': 1, 'prevdoc_detail_docname': so_item.get('name')}, ['name', 'qty'])
if (len(dn_items)>0):
for dn_item in dn_items:
bal_qty += flt(dn_item.get('qty'))
return len(sales_order_items) if bal_qty!=0 else "0"
| StarcoderdataPython |
6593344 |
from translator_pkg.config import Config
from translator_pkg.excel_helper import ExcelHelper
from translator_pkg.excel_json_helper import ExcelJsonHelper
import sys
def close():
sys.exit(2)
#CONSTANS
EVENTS_LABEL_LIST=["events"] # special process for events section in excel file
OBSERVATIONS_LABEL="observations" # special process for observatioins
SUMMARY_LABEL="summary" # special process for observations
class Translator:
"""
A class to run all the process related to the translation.
It depends on Config, ExcelHelper, ExcelJsonHelper
Step 1 extract data, using configuration defined in Config manipulate the excel file and create Json-like items
Step 2 join by IDs and created paths, then data should be linked with others sheets to create corresponding path in the consolidated JSON file
Step 3 format a cleaned, some items are list or dictionaries, in this process Json data is cleaned to be uniform
"""
@classmethod
def translate(cls, excel_name, outputfile):
child_grouper={}# grouper for children ex {initial_conditions:[List], management:[list]}
json_parameters= Config.get_configuration()# template to define the json structure
my_excel_helper = ExcelHelper()
my_excel_helper.load_file(excel_name)
sheets_names= my_excel_helper.get_sheets_names()
#Step 1 extract data from excel
for data in json_parameters:
# events process
data_name = data["name"]
if(data_name in EVENTS_LABEL_LIST):
list_objects=ExcelJsonHelper.several_sheets_reader(data["eventsType"], data, my_excel_helper, "addEvents")
child_grouper["events"]=list_objects
# observed data process
elif (data_name in [OBSERVATIONS_LABEL,SUMMARY_LABEL ] ):
sheetsList=list(filter(lambda x: data["sheetPattern"] in x, sheets_names ))
list_objects=ExcelJsonHelper.several_sheets_reader(sheetsList, data, my_excel_helper)
child_grouper[data_name]=list_objects
# other sheets
else:
list_objects=ExcelJsonHelper.get_data_json_like(data,my_excel_helper)
child_grouper[data_name]=list_objects
print("Step 1 finished (loading)")
## step 2 expand method
EXPAND_ORDER = Config.get_expand_order()
for expandable_item in EXPAND_ORDER:
local_config=next((item for item in json_parameters
if item["name"] == expandable_item),None)
config_name = local_config["name"]
config_expand = local_config["expand"]
for exp in config_expand:
col= exp["col"]
config_name_expand= exp["config_name"]
config_bool_delete= exp.get("delete_after")
for item_to_expand in child_grouper[config_name]:
currentExpansion= next((item for item in child_grouper[config_name_expand]
if item[col] == item_to_expand[col]),None)
# we assume it exists
if currentExpansion:
item_to_expand.update(currentExpansion)
if config_bool_delete:
del item_to_expand[col] #else:
#print("Warning:expand sheet:%s didn't have id:%s"%(config_name_expand,item_to_expand[col]))
del child_grouper[config_name_expand] # one expand aplies only for one sheet
"""
experiments: [{}],
initial_conditions: [{}]
"""
print("Step 2 finished (expanding)")
## step 3 organice data joining based on ids that link elements, create paths when applies
for local_data in json_parameters:
if len(local_data["path"]) >1 :# has a long path, it is not a root element
data_levels=local_data["levels"]
data_type=local_data["type"]
base_path=local_data["path"].pop(0) # remove first elements, root element
for item in child_grouper[local_data["name"]]:
# identify level id
selected_level = None
for level in data_levels:
if level in item and item: # it exist and it is not empty
selected_level = level
break # one level is enough
if not selected_level:
print("item not link with experiments:", item)
print("sheet",local_data["name"])
#locate into the map
for itemParent in child_grouper[base_path]:
# it can be either experiment or treatments IDs
# some items don't have the selected level id
if selected_level in itemParent \
and itemParent[selected_level]==item[selected_level]:
temp_path1=list(local_data["path"])
temp_path2=list(local_data["path"])
ExcelJsonHelper.create_path(temp_path1,itemParent)
ExcelJsonHelper.recursivity_json_path(temp_path2,itemParent, item, data_type)
if local_data["name"] in child_grouper.keys():
del child_grouper[local_data["name"]] # memory clean
print("Step 3 finished (Joining and linking)")
DELETE_THIS=["eid","trt_name", "wst_id", 'soil_id']
ExcelJsonHelper.remove_keys_level(child_grouper,3,DELETE_THIS)
ExcelJsonHelper.get_items_from_parent(child_grouper,["cul_name","crid"])
ExcelJsonHelper.remove_keys_level(child_grouper,2,['crid','cul_name'], False)
print("Step 4 finished json methods(deleting ids, get from parent)")
#ExcelJsonHelper.validate_dates_format(child_grouper, Config.get_dates())
ExcelJsonHelper.write_json(child_grouper,outputfile) | StarcoderdataPython |
1623193 | <filename>visionpy/__init__.py
from visionpy.vision import Vision
from visionpy.vision import Contract
from visionpy.async_vision import AsyncVision
from visionpy.async_contract import AsyncContract
VS = 1_000_000
vdt = 1
| StarcoderdataPython |
370350 | <filename>preprocess_VisualGenome.py
#! -*- coding: utf-8 -*-
# 使用Resnet101提取区域特征
# 注:部分区域图片有问题,需要额外处理
import os
import cv2
import json
import numpy as np
from PIL import Image
from tqdm import tqdm
import matplotlib.pyplot as plt
from bert4keras.backend import keras, K
# 图像模型
preprocessing_image = keras.preprocessing.image
preprocess_input = keras.applications.resnet.preprocess_input
image_model = keras.applications.resnet.ResNet101(include_top=False, weights='imagenet', pooling='avg')
def preprocess_data(files, train=True):
"""读取并整理COCO的数据,提取目标特征.
[
{'region_feature', keywords': str, 'caption': str},
{'region_feature', keywords': str, 'caption': str},
...
]
"""
for _, file in tqdm(enumerate(files)):
res = []
try:
image_data = json.load(open(folder+file), encoding='utf-8')
except UnicodeDecodeError:
print(folder+file)
continue
image_id = file.replace('json', 'jpg')
img_path = './data/VisualGenome/VG_100K/%s' % image_id
if not os.path.exists(img_path):
continue
img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
for region in image_data.values():
r = {}
region_img = img[region['y']:region['y']+region['height'], region['x']:region['x']+region['width']]
# 处理有问题的区域图片
if region_img.any():
x = np.expand_dims(region_img, axis=0)
x = preprocess_input(x)
region_feature = image_model.predict(x)
r['region_feature'] = region_feature.tolist()[-1]
else:
continue
r['caption'] = region["phrase"]
keywords = ''
for ob in region['objects']:
keywords += ob['name'] + ' '
r['keywords'] = keywords
res.append(r)
if train:
np.save('./data/VisualGenome/train2016/'+file.replace('json', 'npy'), res)
else:
np.save('./data/VisualGenome/valid2016/'+file.replace('json', 'npy'), res)
folder = './data/VisualGenome/annotation/regionfiles/'
files = os.listdir(folder)
# 以8:2的方式将数据集划分为训练集和测试集
split_idx = int(len(files)*0.8)
train_data, valid_data = files[:split_idx], files[split_idx:]
preprocess_data(train_data, train=True)
preprocess_data(valid_data, train=False) | StarcoderdataPython |
12821975 | """Utility functions for sleep/wake detection algorithms."""
from typing import Optional
import numpy as np
from typing_extensions import Literal
EPOCH_LENGTH = Literal[30, 60]
def rescore(predictions: np.ndarray, epoch_length: Optional[EPOCH_LENGTH] = 30) -> np.ndarray:
"""Apply Webster's rescoring rules to sleep/wake predictions.
Parameters
----------
predictions : array_like
sleep/wake predictions
epoch_length : int
length of actigraphy epoch in seconds
Returns
-------
array_like
rescored sleep/wake predictions
"""
rescored = predictions.copy()
# rules a through c
rescored = _apply_recording_rules_a_c(rescored, epoch_length)
# rules d and e
rescored = _apply_recording_rules_d_e(rescored, epoch_length)
# wake phases of 1 minute, surrounded by sleep, get rescored
for t in range(1, len(rescored) - 1):
if rescored[t] == 1 and rescored[t - 1] == 0 and rescored[t + 1] == 0:
rescored[t] = 0
return rescored
def _apply_recording_rules_a_c(rescored: np.ndarray, epoch_length: EPOCH_LENGTH): # pylint:disable=too-many-branches
wake_bin = 0
for t in range(len(rescored)): # pylint:disable=consider-using-enumerate
if rescored[t] == 1:
wake_bin += 1
else:
if epoch_length == 30:
if wake_bin >= 30:
# rule c: at least 15 minutes of wake, next 4 minutes of sleep get rescored
rescored[t : t + 8] = 0
elif 20 <= wake_bin < 30:
# rule b: at least 10 minutes of wake, next 3 minutes of sleep get rescored
rescored[t : t + 6] = 0
elif 8 <= wake_bin < 20:
# rule a: at least 4 minutes of wake, next 1 minute of sleep gets rescored
rescored[t : t + 2] = 0
wake_bin = 0
else:
if wake_bin >= 15:
# rule c: at least 15 minutes of wake, next 4 minutes of sleep get rescored
rescored[t : t + 4] = 0
elif 10 <= wake_bin < 15:
# rule b: at least 10 minutes of wake, next 3 minutes of sleep get rescored
rescored[t : t + 3] = 0
elif 4 <= wake_bin < 10:
# rule a: at least 4 minutes of wake, next 1 minute of sleep gets rescored
rescored[t : t + 1] = 0
wake_bin = 0
return rescored
def _apply_recording_rules_d_e(rescored: np.ndarray, epoch_length: EPOCH_LENGTH): # pylint:disable=too-many-branches
# rule d/e: 6/10 minutes or less of sleep surrounded by at least 10/20 minutes of wake on each side get rescored
if epoch_length == 30:
sleep_rules = [12, 20]
wake_rules = [20, 40]
else:
sleep_rules = [6, 10]
wake_rules = [10, 20]
for sleep_thres, wake_thres in zip(sleep_rules, wake_rules):
sleep_bin = 0
start_ind = 0
for t in range(wake_thres, len(rescored) - wake_thres):
if rescored[t] == 1:
sleep_bin += 1
if sleep_bin == 1:
start_ind = t
else:
sum1 = np.sum(rescored[start_ind - wake_thres : start_ind])
sum2 = np.sum(rescored[t : t + wake_thres])
if sleep_thres >= sleep_bin > 0 == sum1 and sum2 == 0:
rescored[start_ind:t] = 0
sleep_bin = 0
return rescored
| StarcoderdataPython |
6591413 | <filename>adapter/debugger.py
import sys
import lldb
import codelldb
from value import Value
def evaluate(expr, unwrap=False):
exec_context = lldb.SBExecutionContext(lldb.frame)
value = codelldb.evaluate_in_context(expr, True, exec_context)
return Value.unwrap(value) if unwrap else value
def wrap(obj):
return obj if type(obj) is Value else Value(obj)
def unwrap(obj):
return Value.unwrap(obj)
def display_html(html, title=None, position=None, reveal=False):
codelldb.display_html(html, title, position, reveal)
def register_type_callback(callback, language=None, type_class_mask=lldb.eTypeClassAny):
raise NotImplementedError('This API has been removed')
def register_content_provider(provider):
raise NotImplementedError('This API has been removed')
def stop_if(cond, handler):
import warnings
warnings.warn('deprecated', DeprecationWarning)
if cond:
handler()
return True
else:
return False
__all__ = ['evaluate', 'wrap', 'unwrap', 'display_html', 'register_type_callback', 'register_content_provider', 'stop_if']
| StarcoderdataPython |
1787353 | <reponame>cmayes/md_utils
#!/usr/bin/env python
"""
Given a file with columns of data (comma or space separated):
return a file that has lines filtered by specified min and max values
"""
from __future__ import print_function
import argparse
import os
import sys
import numpy as np
from md_utils.md_common import (InvalidDataError, warning,
create_out_fname, process_cfg, read_csv_to_list,
list_to_csv, IO_ERROR, GOOD_RET, INPUT_ERROR, INVALID_DATA,
find_files_by_dir)
try:
# noinspection PyCompatibility
from ConfigParser import ConfigParser, NoSectionError, ParsingError
except ImportError:
# noinspection PyCompatibility
from configparser import ConfigParser, NoSectionError, ParsingError
__author__ = 'hbmayes'
# Constants #
# Config File Sections
MAIN_SEC = 'main'
MAX_SEC = 'max_vals'
MIN_SEC = 'min_vals'
SUB_SECTIONS = [MAX_SEC, MIN_SEC]
SECTIONS = [MAIN_SEC] + SUB_SECTIONS
FILE_PAT = 'file_pattern'
# Defaults
DEF_CFG_FILE = 'replace_col.ini'
DEF_ARRAY_FILE = 'column_data.csv'
DEF_DELIMITER = ','
FILTER_HEADERS = 'filter_col_names'
DEF_FILE_PAT = 'seed*csv'
DEF_CFG_VALS = {FILE_PAT: DEF_FILE_PAT}
REQ_KEYS = {}
BINS = 'bin_array'
MOD = 'modulo'
QUOT = 'quotient'
def check_vals(config, sec_name):
"""
Reads the max or min vals section of the given config file,
returning a dict containing the original string key paired with a float representing the max or min value.
If there is no specified section, an empty dict is returned. Invalid values result in DataExceptions.
:param config: The parsed config file that contains a max and/or min section.
:param sec_name: the name of the section with string/float pairs to digest
:return: A dict mapping the original column key to the float limit value.
"""
limit_vals = {}
limit_val = np.nan
col_name = None
try:
for col_name, limit_val in config.items(sec_name):
# I don't test for non-unique column name because, if a col_name appears twice, the parser has already
# handled it by overwriting the value for that key
limit_vals[col_name] = float(limit_val)
except NoSectionError:
# not a problem
pass
except ValueError:
raise InvalidDataError("For section '{}' key '{}', could not convert value '{}' to a float."
.format(sec_name, col_name, limit_val, ))
return limit_vals
def read_cfg(floc, cfg_proc=process_cfg):
"""
Reads the given configuration file, returning a dict with the converted values supplemented by default values.
:param floc: The location of the file to read.
:param cfg_proc: The processor to use for the raw configuration values. Uses default values when the raw
value is missing.
:return: A dict of the processed configuration file's data.
"""
config = ConfigParser()
try:
good_files = config.read(floc)
except ParsingError as e:
raise InvalidDataError(e)
if not good_files:
raise IOError('Could not read file {}'.format(floc))
main_proc = cfg_proc(dict(config.items(MAIN_SEC)), DEF_CFG_VALS, REQ_KEYS, int_list=False)
# Check that there is a least one subsection, or this script won't do anything. Check that all sections given
# are expected or alert user that a given section is ignored (thus catches types, etc.)
no_work_to_do = True
for section in config.sections():
if section in SECTIONS:
if section in SUB_SECTIONS:
if len(config.items(section)) > 0:
no_work_to_do = False
else:
warning("Found section '{}', which will be ignored. Expected section names are: {}"
.format(section, ", ".join(SECTIONS)))
if no_work_to_do:
warning("No filtering will be applied as no criteria were found for the expected subsections ({})."
"".format(", ".join(SUB_SECTIONS)))
for section in [MAX_SEC, MIN_SEC]:
main_proc[section] = check_vals(config, section)
return main_proc
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(description='Reads in a file containing a header with columns of data. Using '
'specifications from a configuration file, it changes values in rows '
'based on column min and/or max values, and overwrites the original '
'file.')
parser.add_argument("-c", "--config", help="The location of the configuration file in ini format. "
"The default file name is {}, located in the "
"base directory where the program as run.".format(DEF_CFG_FILE),
default=DEF_CFG_FILE, type=read_cfg)
parser.add_argument("-d", "--delimiter", help="Delimiter separating columns in the FILE to be edited. "
"The default is: '{}'".format(DEF_DELIMITER),
default=DEF_DELIMITER)
parser.add_argument("-b", "--base_dir", help="The starting point for a file search "
"(defaults to current directory)",
default=os.getcwd())
parser.add_argument("-f", "--src_file", help="The single file to read from (takes precedence "
"over base_dir)")
args = None
try:
args = parser.parse_args(argv)
except IOError as e:
warning(e)
parser.print_help()
return args, IO_ERROR
except (InvalidDataError, SystemExit) as e:
if hasattr(e, 'code') and e.code == 0:
return args, GOOD_RET
warning(e)
parser.print_help()
return args, INPUT_ERROR
return args, GOOD_RET
def process_file(data_file, mcfg, delimiter=','):
list_vectors, headers = read_csv_to_list(data_file, delimiter=delimiter, header=True)
col_index_dict = {}
for section in SUB_SECTIONS:
col_index_dict[section] = {}
for key, val in mcfg[section].items():
if key in headers:
# Parser already made sure that unique entries
col_index_dict[section][headers.index(key)] = val
else:
raise InvalidDataError("Key '{}' found in configuration file but not in data file: "
"{}".format(key, data_file))
edited_vectors = []
for row in list_vectors:
for col, max_val in col_index_dict[MAX_SEC].items():
if row[col] > max_val:
row[col] = max_val
for col, min_val in col_index_dict[MIN_SEC].items():
if row[col] < min_val:
row[col] = min_val
edited_vectors.append(row)
f_name = create_out_fname(data_file, ext='.csv')
list_to_csv([headers] + edited_vectors, f_name, delimiter=',')
def main(argv=None):
# Read input
args, ret = parse_cmdline(argv)
if ret != GOOD_RET or args is None:
return ret
cfg = args.config
try:
if args.src_file is not None:
process_file(args.src_file, cfg, delimiter=args.delimiter)
else:
found_files = find_files_by_dir(args.base_dir, cfg[FILE_PAT])
# noinspection PyCompatibility
for f_dir, files in list(found_files.items()):
if not files:
warning("No files found for dir '{}'".format(f_dir))
continue
for csv_path in ([os.path.join(f_dir, tgt) for tgt in files]):
process_file(csv_path, cfg, delimiter=args.delimiter)
except IOError as e:
warning("Problems reading file:", e)
return IO_ERROR
except (ValueError, InvalidDataError) as e:
warning("Problems reading data:", e)
return INVALID_DATA
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
| StarcoderdataPython |
9697608 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Aodh collectd plugin."""
import logging
try:
# pylint: disable=import-error
import collectd
# pylint: enable=import-error
except ImportError:
collectd = None # when running unit tests collectd is not avaliable
import collectd_openstack
from collectd_openstack.aodh.notifier import Notifier
from collectd_openstack.common.logger import CollectdLogHandler
from collectd_openstack.common.meters import MeterStorage
from collectd_openstack.common.settings import Config
LOGGER = logging.getLogger(__name__)
ROOT_LOGGER = logging.getLogger(collectd_openstack.__name__)
def register_plugin(collectd):
"""Bind plugin hooks to collectd and viceversa."""
config = Config.instance()
# Setup loggging
log_handler = CollectdLogHandler(collectd=collectd, config=config)
ROOT_LOGGER.addHandler(log_handler)
ROOT_LOGGER.setLevel(logging.DEBUG)
# Creates collectd plugin instance
instance = Plugin(collectd=collectd, config=config)
# Register plugin callbacks
collectd.register_config(instance.config)
collectd.register_shutdown(instance.shutdown)
collectd.register_notification(instance.notify)
class Plugin(object):
"""Aodh plugin with collectd callbacks."""
# NOTE: this is a multithreaded class
def __init__(self, collectd, config):
"""Plugin instance."""
self._config = config
self._meters = MeterStorage(collectd=collectd)
self._notifier = Notifier(self._meters, config=config)
def config(self, cfg):
"""Configuration callback.
@param cfg configuration node provided by collectd
"""
self._config.read(cfg)
def notify(self, vl, data=None):
"""Notification callback."""
LOGGER.info("Notification")
self._notifier.notify(vl, data)
def shutdown(self):
"""Shutdown callback."""
LOGGER.info("SHUTDOWN")
if collectd:
register_plugin(collectd=collectd)
| StarcoderdataPython |
3303163 | <reponame>bhrevol/afesta-tools
"""Tests for the vcs module."""
| StarcoderdataPython |
172360 | <reponame>will-fawcett/trackerSW<filename>tracker-visual/geant_fullsim_field.py
# WJF add: parse args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--geo', type=str, default=None, help='specify compact file for the geometry')
args, _ = parser.parse_known_args()
from Gaudi.Configuration import *
# Data service
from Configurables import FCCDataSvc
podioevent = FCCDataSvc("EventDataSvc")
from Configurables import GenAlg, MomentumRangeParticleGun
## Particle Gun using MomentumRangeParticleGun tool and FlatSmearVertex
# MomentumRangeParticleGun generates particles of given type(s) within given momentum, phi and theta range
# FlatSmearVertex smears the vertex with uniform distribution
pgun_tool = MomentumRangeParticleGun(PdgCodes=[13, -13])
pgun_tool.ThetaMin = 0.0
pgun_tool.ThetaMax = 2*3.14
gen = GenAlg("ParticleGun", SignalProvider=pgun_tool, VertexSmearingTool="FlatSmearVertex")
gen.hepmc.Path = "hepmc"
from Configurables import Gaudi__ParticlePropertySvc
## Particle service
# list of possible particles is defined in ParticlePropertiesFile
ppservice = Gaudi__ParticlePropertySvc("ParticlePropertySvc", ParticlePropertiesFile="Generation/data/ParticleTable.txt")
# DD4hep geometry service
# Parses the given xml file
from Configurables import GeoSvc
# WJF edits
pathToXML = 'file:Detector/DetFCChhTrackerTkLayout/compact/'
myFile = "FCChh_triplet_layer2_4cm.xml"
myFile = "FCCtriplet_4barrel35mm.xml"
myFile = "FCCtriplet_1barrel30mm.xml"
myFilePath =pathToXML
myFilePath = 'file:'+args.geo
geoservice = GeoSvc("GeoSvc", detectors=['file:Detector/DetFCChhBaseline1/compact/FCChh_DectEmptyMaster.xml',
myFilePath
],
OutputLevel = DEBUG)
from Configurables import HepMCToEDMConverter
## Reads an HepMC::GenEvent from the data service and writes a collection of EDM Particles
hepmc_converter = HepMCToEDMConverter("Converter")
hepmc_converter.hepmc.Path="hepmc"
hepmc_converter.genparticles.Path="allGenParticles"
hepmc_converter.genvertices.Path="allGenVertices"
# Geant4 service
# Configures the Geant simulation: geometry, physics list and user actions
from Configurables import SimG4Svc
# giving the names of tools will initialize the tools of that type
geantservice = SimG4Svc("SimG4Svc", detector='SimG4DD4hepDetector', physicslist="SimG4FtfpBert", actions="SimG4FullSimActions")
from Configurables import SimG4ConstantMagneticFieldTool
field = SimG4ConstantMagneticFieldTool("SimG4ConstantMagneticFieldTool", FieldOn=True, IntegratorStepper="ClassicalRK4")
# Geant4 algorithm
# Translates EDM to G4Event, passes the event to G4, writes out outputs via tools
from Configurables import SimG4Alg, SimG4SaveTrackerHits, SimG4PrimariesFromEdmTool
# first, create a tool that saves the tracker hits
# Name of that tool in GAUDI is "XX/YY" where XX is the tool class name ("SimG4SaveTrackerHits")
# and YY is the given name ("saveTrackerHits")
savetrackertool = SimG4SaveTrackerHits("saveTrackerHits", readoutNames = ["TrackerBarrelReadout", "TrackerEndcapReadout"])
savetrackertool.positionedTrackHits.Path = "positionedHits"
savetrackertool.trackHits.Path = "hits"
# next, create the G4 algorithm, giving the list of names of tools ("XX/YY")
particle_converter = SimG4PrimariesFromEdmTool("EdmConverter")
particle_converter.genParticles.Path = "allGenParticles"
geantsim = SimG4Alg("SimG4Alg",
outputs = ["SimG4SaveTrackerHits/saveTrackerHits"],
eventProvider=particle_converter)
# PODIO algorithm
from Configurables import PodioOutput
out = PodioOutput("out",
OutputLevel=DEBUG)
out.outputCommands = ["keep *"]
#out.filename = "tracker_with_field.root"
out.filename = myFilePath.split('/')[-1].replace('.xml','_hits.root')
# ApplicationMgr
from Configurables import ApplicationMgr
ApplicationMgr( TopAlg = [gen, hepmc_converter, geantsim, out],
EvtSel = 'NONE',
EvtMax = 5000,
# order is important, as GeoSvc is needed by SimG4Svc
ExtSvc = [ppservice, podioevent, geoservice, geantservice],
OutputLevel=DEBUG
)
| StarcoderdataPython |
193247 | """empty message
Revision ID: 5ae213c6353
Revises: 3c73f5517a2
Create Date: 2015-07-01 16:08:50.298141
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '3c73f5517a2'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_index('ix_pictures_path_to_image')
op.drop_index('ix_pictures_path_to_thumbnail')
op.drop_index('ix_pictures_name')
op.drop_index('ix_pictures_description')
op.drop_index('ix_pictures_upload_date')
op.drop_index('ix_pictures_size')
with op.batch_alter_table(
'pictures',
reflect_args=[sa.Column(
'description',
sa.VARCHAR())]
) as batch_op:
batch_op.alter_column('description',
existing_type=sa.VARCHAR(),
nullable=True)
op.create_index(
op.f('ix_pictures_path_to_image'),
'pictures', ['path_to_image'], unique=False)
op.create_index(
op.f('ix_pictures_path_to_thumbnail'),
'pictures', ['path_to_thumbnail'], unique=False)
op.create_index(op.f('ix_pictures_name'), 'pictures', ['name'],
unique=False)
op.create_index(op.f('ix_pictures_description'), 'pictures', ['description'],
unique=False)
op.create_index(
op.f('ix_pictures_upload_date'),
'pictures',
['upload_date'],
unique=False)
op.create_index(op.f('ix_pictures_size'), 'pictures', ['size'], unique=False)
def downgrade():
op.drop_index('ix_pictures_path_to_image')
op.drop_index('ix_pictures_path_to_thumbnail')
op.drop_index('ix_pictures_name')
op.drop_index('ix_pictures_description')
op.drop_index('ix_pictures_upload_date')
op.drop_index('ix_pictures_size')
with op.batch_alter_table(
'pictures',
reflect_args=[sa.Column(
'description',
sa.VARCHAR())]
) as batch_op:
batch_op.alter_column('description',
existing_type=sa.VARCHAR(),
nullable=False)
| StarcoderdataPython |
11387360 | import wpilib
import wpilib.drive
ENCODER_ROTATION = 1023
WHEEL_DIAMETER = 7.639
SARAH_MULTIPLIER = 0.5
class Drive:
drivetrain = wpilib.drive.DifferentialDrive
def __init__(self):
self.enabled = False
def on_enable(self):
self.y = 0
self.rotation = 0
# Verb functions -- these functions do NOT talk to motors directly. This
# allows multiple callers in the loop to call our functions without
# conflicts.
def move(self, y, rotation, sarah=False):
"""
Causes the robot to move
:param y: The speed that the robot should drive in the Y direction.
:param rotation: The rate of rotation for the robot that is completely independent of the translation.
:param sarah: Is Sarah driving?
"""
if sarah:
y *= SARAH_MULTIPLIER
rotation *= SARAH_MULTIPLIER
self.y = y
self.rotation = rotation
def execute(self):
"""Actually drive."""
self.drivetrain.arcadeDrive(self.y, self.rotation)
# Prevent robot from driving by default
self.y = 0
self.rotation = 0
| StarcoderdataPython |
8176954 | from typing import Optional
import colorful as cf
from kolga.utils.models import SubprocessResult
class Logger:
"""
Class for logging of events in the DevOps pipeline
"""
def _create_message(self, message: str, icon: Optional[str] = None) -> str:
icon_string = f"{icon} " if icon else ""
return f"{icon_string}{message}"
def error(
self,
message: str = "",
icon: Optional[str] = None,
error: Optional[Exception] = None,
raise_exception: bool = True,
) -> None:
"""
Log formatted errors to stdout and optionally raise them
Args:
message: Verbose/Custom error message of the exception
icon: Icon to place as before the output
error: Exception should be logged and optionally raised
raise_exception: If True, raise `error` if passed, otherwise raise `Exception`
"""
message_string = message if message else "An error occured"
_message = self._create_message(message_string, icon)
if error and not raise_exception:
_message += f"{error}"
print(f"{cf.red}{_message}{cf.reset}") # noqa: T001
if raise_exception:
error = error or Exception(message_string)
raise error
def warning(self, message: str, icon: Optional[str] = None) -> None:
"""
Log formatted warnings to stdout
Args:
message: Verbose/Custom error message of the exception
icon: Icon to place as before the output
"""
_message = self._create_message(message, icon)
print(f"{cf.yellow}{_message}{cf.reset}") # noqa: T001
def success(self, message: str = "", icon: Optional[str] = None) -> None:
"""
Log formatted successful events to stdout
Args:
message: Verbose/Custom error message of the exception
icon: Icon to place as before the output
"""
message_string = message if message else "Done"
_message = self._create_message(message_string, icon)
print(f"{cf.green}{_message}{cf.reset}") # noqa: T001
def info(
self,
message: str = "",
title: str = "",
icon: Optional[str] = None,
end: str = "\n",
) -> None:
"""
Log formatted info events to stdout
Args:
title: Title of the message, printed in bold
message: Verbose/Custom error message of the exception
icon: Icon to place as before the output
end: Ending char of the message, for controlling new line for instance
"""
message_string = (
f"{cf.bold}{title}{cf.reset}{message}" if title else f"{message}"
)
_message = self._create_message(message_string, icon)
print(f"{_message}", end=end, flush=True) # noqa: T001
def std(
self,
std: SubprocessResult,
raise_exception: bool = False,
log_error: bool = True,
) -> None:
"""
Log results of :class:`SubprocessResult` warnings to stdout
Args:
std: Result from a subprocess call
raise_exception: If True, raise `Exception`
log_error: If True, log the error part of the result with :func:`~Logger.error`
"""
if log_error:
logger.error(message=std.err, raise_exception=False)
output_string = f"\n{cf.green}stdout:\n{cf.reset}{std.out}\n{cf.red}stderr:\n{cf.reset}{std.err}"
if raise_exception:
raise Exception(output_string)
else:
print(output_string) # noqa: T001
logger = Logger()
| StarcoderdataPython |
3271217 | <filename>World Finals/gallery-of-pillars.py
# Copyright (c) 2019 kamyu. All rights reserved.
#
# Google Code Jam 2016 World Finals - Problem C. Gallery of Pillars
# https://code.google.com/codejam/contest/7234486/dashboard#s=p2
#
# Time: O(NlogN)
# Space: O(M)
#
from math import sqrt
def count(side_len, r_square): # Time: O(side_length) = O(N/d), Space: O(1)
# count pairs of |(x, y)|^2 <= r_square and
# 0 <= x, y <= min(side_len, int(sqrt(r_square))) and (x, y) != (0, 0)
result = 0
y = side_len
if r_square < y*y:
y = int(sqrt(r_square)) # Time: O(log(N/d))
for x in xrange(y+1):
while x*x + y*y > r_square:
y -= 1 # Time: O(N/d)
result += y+1 # (x, 0) ~ (x, y)
return result-1 # exclude (0, 0)
def gallery_of_pillars():
N, R = map(int, raw_input().strip().split())
# count pairs of |(x, y)| < M/R and 0 <= x, y <= N-1 and gcd(x, y) = 1
result = 0
r_square = (M*M-1)//(R*R)
for d in xrange(1, min(N-1, int(sqrt(r_square)))+1): # Time: sum of O(N/d) = O(NlogN), see https://math.stackexchange.com/questions/306371/simple-proof-of-showing-the-harmonic-number-h-n-theta-log-n
if MU[d]: # see https://artofproblemsolving.com/wiki/index.php/Mobius_function
result += MU[d] * count((N-1)//d, r_square//(d*d))
return result
def sieve_of_eratosthenes(n): # Time: O(Mlog(logM)), Space: O(M)
is_prime = [True]*n
mu = [1]*n
for i in xrange(2, n):
if not is_prime[i]:
continue
for j in xrange(i+i, n, i):
is_prime[j] = False
for j in xrange(i, n, i):
mu[j] = -mu[j]
if i <= n//i:
for j in xrange(i*i, n, i*i):
mu[j] = 0
return mu
M = 10**6
MU = sieve_of_eratosthenes(M)
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, gallery_of_pillars())
| StarcoderdataPython |
71914 | from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import traceback, sys
import logging
# Make this threadsafe
class QtHandler(logging.Handler):
def __init__(self, output_widget: QTextEdit):
logging.Handler.__init__(self)
self.widget: QTextEdit = output_widget
self.widget.setReadOnly(True)
def emit(self, record):
try:
msg = self.format(record)
print(f"emiting: {msg}")
self.widget.appendPlainText(msg)
except:
print("something bad happened")
def setup_logs(logger, output_widget=None):
# to Gui
qtLogHandler = QtHandler(output_widget)
qtLogHandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(qtLogHandler)
logger = logging.getLogger(__name__)
# to console
syslog = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s.%(msecs)03d %(threadName)10s] -- %(funcName)20s() -> %(levelname)5s: %(message)s',"%H:%M:%S")
syslog.setFormatter(formatter)
logger.addHandler(syslog)
logger.setLevel(logging.DEBUG)
# thanks https://www.learnpyqt.com/courses/concurrent-execution/multithreading-pyqt-applications-qthreadpool/
class WorkerSignals(QObject):
'''
Defines the signals available from a running worker thread.
Supported signals are:
finished
No data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` indicating % progress
'''
finished = pyqtSignal()
error = pyqtSignal(tuple)
result = pyqtSignal(object)
progress = pyqtSignal(int)
class Worker(QRunnable):
'''
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
# self.kwargs['progress_callback'] = self.signals.progress
@pyqtSlot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
# Retrieve args/kwargs here; and fire processing using them
try:
print(self.fn, self.args, self.kwargs)
result = self.fn(*self.args, **self.kwargs)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result) # Return the result of the processing
finally:
self.signals.finished.emit() # Done
| StarcoderdataPython |
3245193 | from SQLiteExperiment import SQLiteExperiment
#class MyExperiment(SQLiteExperiment):
# def __init__(self):
# inputs = ['alpha','beta','gamma']
# outputs = ['a','b','c']
# super().__init__(inputs,outputs,overwrite = True)
compute = lambda v: {
'a': -v['alpha']-2*v['beta']-2*v['gamma']+6,
'b': -v['alpha']-2*v['beta']+v['gamma'],
'c': v['alpha']-3*v['gamma']/v['beta']
}
experiment = SQLiteExperiment(['alpha','beta','gamma'],['a','b','c'],computeFunction=compute,overwrite=True)
experiment.build()
experiment.add('change alpha','alpha',{'alpha':1,'beta':1,'gamma':1},0.25,10,'study to how alpha value affact the output a,b,c')
experiment.add('varie beta','beta',{'alpha':1,'beta':1,'gamma':1},0.25,10,'How beta variable change output a,b,c ')
experiment.add('gamma observe','gamma',{'alpha':1,'beta':1,'gamma':1},0.25,10,'output a,b,c that create from difference gamma value')
experiment.run()
experiment.plot()
| StarcoderdataPython |
8053807 | <gh_stars>0
# Import all the necessary libraries
import os
import datetime
import glob
import random
import sys
import matplotlib.pyplot as plt
import skimage.io #Used for imshow function
import skimage.transform #Used for resize function
from skimage.morphology import label #Used for Run-Length-Encoding RLE to create final submission
import numpy as np
import pandas as pd
import keras
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, Conv2DTranspose
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.models import load_model, Model
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.merge import add, concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import multi_gpu_model, plot_model
from keras import backend as K
import tensorflow as tf
import sklearn
from sklearn.model_selection import train_test_split
# Set number of GPUs
num_gpus = 1 #defaults to 1 if one-GPU or one-CPU. If 4 GPUs, set to 4.
# Set height (y-axis length) and width (x-axis length) to train model on
img_height, img_width = (128,128) #Default to (256,266), use (None,None) if you do not want to resize imgs
from tensorflow.python.client import device_lib
from keras.losses import binary_crossentropy
import keras.optimizers as optimizers
from keras.models import load_model
import numpy as np
import shutil
import keras
import os
import data_loader
import train
import model
import utils
keras.backend.set_image_data_format('channels_last')
"""
TODO:
- If test images empty: make sure it wotks.
"""
MODE = "GPU" if "GPU" in [k.device_type for k in device_lib.list_local_devices()] else "CPU"
print(MODE)
"""
This script is a demo of how to use the diffrent functions from https://github.com/lebrat/Biolapse to train a neural network
to segment images with temporal information.
Parameters:
- epoch: number of iteration in learning phase.
- lr: learning rate, step-size of the optimization algorithm.
- momentum: weight of previous iteration.
- decay: weight of the penalization of the weights. Tends to have more small value weights.
- steps_per_epoch: number of iteration within an iteration.
- batch_size: number of sample used to aggregate the descent step in optimization method. If GPU
runs out of memory, this might be because batch_size is too large.
- type_im: np.uint8 or np.uint16. Used to properly load images.
- (nx, ny): spatial shape of images used to train neural network. If GPU runs out of memory, this might
be because nx and/or ny is too large.
- TIME: size of the temporal sample use to predict segmentation. Large value of TIME might lead to better
results but it requires GPU with large memory.
- path_train, path_test: folders containin training and testing images. Can contain many inside folders,
at the end masks should be in a folder name 'masks' and images in a folder 'images' at the same level.
- model: 'Unet3D', 'Unet2D' or 'LSTM' neural network model.
Output:
Save in Data/Model/Model a file name_save+'.h5' containing the neural network. Validation and training
informations are stored in Data/Model/Information/name_save+'.p'.
"""
## Parameters
epoch = 250
lr = 1e-2
momentum = 0.8
decay = 1e-6
steps_per_epoch = 100
batch_size = 4
type_im = np.uint16
nx = ny = 128
TIME = 10
path_train = '../Data/Segmentation/Train2D'
path_test = '../Data/Segmentation/Test2D'
name_save = 'nn_unet2D'
model_name = 'Unet2D' # 'LSTM'
# Data
X_train, Y_train = data_loader.path_to_batchsV2(path_train,nx,ny,type_im=type_im,format_im='png',
code_im1='images',code_im2='masks')
X_train = np.array(X_train, dtype=np.float32)
Y_train = np.array(Y_train, dtype=np.float32)
for t in range(X_train.shape[0]):
if np.max(X_train[t])!=0:
X_train[t] = X_train[t]/np.max(X_train[t])
if np.max(Y_train[t])!=0:
Y_train[t] = Y_train[t]/np.max(Y_train[t])
X_test, Y_test = data_loader.path_to_batchsV2(path_test,nx,ny,type_im=type_im,format_im='png',
code_im1='images',code_im2='masks')
X_test = np.array(X_test, dtype=np.float32)
Y_test = np.array(Y_test, dtype=np.float32)
for t in range(X_test.shape[0]):
if np.max(X_test[t])!=0:
X_test[t] = X_test[t]/np.max(X_test[t])
if np.max(Y_test[t])!=0:
Y_test[t] = Y_test[t]/np.max(Y_test[t])
# Illustrate the train images and masks
plt.figure(figsize=(20,16))
x, y = 12,4
for i in range(y):
for j in range(x):
# train image
plt.subplot(y*2, x, i*2*x+j+1)
pos = i*120 + j*10
plt.imshow(np.squeeze(X_train[pos]))
plt.title('Image #{}'.format(pos))
plt.axis('off')
plt.subplot(y*2, x, (i*2+1)*x+j+1)
plt.imshow(np.squeeze(Y_train[pos]))
plt.title('Mask #{}'.format(pos))
plt.axis('off')
#plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.show(False)
# Design our model architecture here
def keras_model(img_width=256, img_height=256):
'''
Modified from https://keunwoochoi.wordpress.com/2017/10/11/u-net-on-keras-2-0/
'''
n_ch_exps = [4, 5, 6, 7, 8, 9] #the n-th deep channel's exponent i.e. 2**n 16,32,64,128,256
k_size = (3, 3) #size of filter kernel
k_init = 'he_normal' #kernel initializer
if K.image_data_format() == 'channels_first':
ch_axis = 1
input_shape = (3, img_width, img_height)
elif K.image_data_format() == 'channels_last':
ch_axis = 3
input_shape = (img_width, img_height, 1)
inp = Input(shape=input_shape)
encodeds = []
# encoder
enc = inp
print(n_ch_exps)
for l_idx, n_ch in enumerate(n_ch_exps):
enc = Conv2D(filters=2**n_ch, kernel_size=k_size, activation='relu', padding='same', kernel_initializer=k_init)(enc)
enc = Dropout(0.1*l_idx,)(enc)
enc = Conv2D(filters=2**n_ch, kernel_size=k_size, activation='relu', padding='same', kernel_initializer=k_init)(enc)
encodeds.append(enc)
#print(l_idx, enc)
if n_ch < n_ch_exps[-1]: #do not run max pooling on the last encoding/downsampling step
enc = MaxPooling2D(pool_size=(2,2))(enc)
# decoder
dec = enc
print(n_ch_exps[::-1][1:])
decoder_n_chs = n_ch_exps[::-1][1:]
for l_idx, n_ch in enumerate(decoder_n_chs):
l_idx_rev = len(n_ch_exps) - l_idx - 2 #
dec = Conv2DTranspose(filters=2**n_ch, kernel_size=k_size, strides=(2,2), activation='relu', padding='same', kernel_initializer=k_init)(dec)
dec = concatenate([dec, encodeds[l_idx_rev]], axis=ch_axis)
dec = Conv2D(filters=2**n_ch, kernel_size=k_size, activation='relu', padding='same', kernel_initializer=k_init)(dec)
dec = Dropout(0.1*l_idx)(dec)
dec = Conv2D(filters=2**n_ch, kernel_size=k_size, activation='relu', padding='same', kernel_initializer=k_init)(dec)
outp = Conv2DTranspose(filters=1, kernel_size=k_size, activation='sigmoid', padding='same', kernel_initializer='glorot_normal')(dec)
model = Model(inputs=[inp], outputs=[outp])
return model
# Set some model compile parameters
optimizer = 'adam'
loss = utils.bce_dice_loss
metrics = [utils.mean_iou]
# Compile our model
model = keras_model(img_width=img_width, img_height=img_height)
model.summary()
# For more GPUs
if num_gpus > 1:
model = multi_gpu_model(model, gpus=num_gpus)
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
seed=42
# Runtime data augmentation
def get_train_test_augmented(X_data=X_train, Y_data=Y_train, validation_split=0.1, batch_size=32, seed=seed):
X_train, X_test, Y_train, Y_test = train_test_split(X_data,
Y_data,
train_size=1-validation_split,
test_size=validation_split,
random_state=seed)
# Image data generator distortion options
data_gen_args = dict(rotation_range=45.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect') #use 'constant'??
# Train data, provide the same seed and keyword arguments to the fit and flow methods
X_datagen = ImageDataGenerator(**data_gen_args)
Y_datagen = ImageDataGenerator(**data_gen_args)
X_datagen.fit(X_train, augment=True, seed=seed)
Y_datagen.fit(Y_train, augment=True, seed=seed)
X_train_augmented = X_datagen.flow(X_train, batch_size=batch_size, shuffle=True, seed=seed)
Y_train_augmented = Y_datagen.flow(Y_train, batch_size=batch_size, shuffle=True, seed=seed)
# Test data, no data augmentation, but we create a generator anyway
X_datagen_val = ImageDataGenerator()
Y_datagen_val = ImageDataGenerator()
X_datagen_val.fit(X_test, augment=True, seed=seed)
Y_datagen_val.fit(Y_test, augment=True, seed=seed)
X_test_augmented = X_datagen_val.flow(X_test, batch_size=batch_size, shuffle=True, seed=seed)
Y_test_augmented = Y_datagen_val.flow(Y_test, batch_size=batch_size, shuffle=True, seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(X_train_augmented, Y_train_augmented)
test_generator = zip(X_test_augmented, Y_test_augmented)
return train_generator, test_generator, X_train, X_test, Y_train, Y_test
# Runtime custom callbacks
#%% https://github.com/deepsense-ai/intel-ai-webinar-neural-networks/blob/master/live_loss_plot.py
# Fixed code to enable non-flat loss plots on keras model.fit_generator()
import matplotlib.pyplot as plt
from keras.callbacks import Callback
from IPython.display import clear_output
#from matplotlib.ticker import FormatStrFormatter
def translate_metric(x):
translations = {'acc': "Accuracy", 'loss': "Log-loss (cost function)"}
if x in translations:
return translations[x]
else:
return x
class PlotLosses(Callback):
def __init__(self, figsize=None):
super(PlotLosses, self).__init__()
self.figsize = figsize
def on_train_begin(self, logs={}):
self.base_metrics = [metric for metric in self.params['metrics'] if not metric.startswith('val_')]
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs.copy())
clear_output(wait=True)
plt.figure(figsize=self.figsize)
for metric_id, metric in enumerate(self.base_metrics):
plt.subplot(1, len(self.base_metrics), metric_id + 1)
plt.plot(range(1, len(self.logs) + 1),
[log[metric] for log in self.logs],
label="training")
if self.params['do_validation']:
plt.plot(range(1, len(self.logs) + 1),
[log['val_' + metric] for log in self.logs],
label="validation")
plt.title(translate_metric(metric))
plt.xlabel('epoch')
plt.legend(loc='center left')
plt.tight_layout()
plt.show(False);
plot_losses = PlotLosses(figsize=(16, 4))
# Finally train the model!!
batch_size = 16
train_generator, test_generator, X_train, X_val, Y_train, Y_val = get_train_test_augmented(X_data=X_train, Y_data=Y_train, validation_split=0.1, batch_size=batch_size)
# increase epoch on your own machine
model.fit_generator(train_generator, validation_data=test_generator, validation_steps=batch_size/2, steps_per_epoch=len(X_train)/(batch_size*2), epochs=30, callbacks=[plot_losses])
# Save the model weights to a hdf5 file
if num_gpus > 1:
#Refer to https://stackoverflow.com/questions/41342098/keras-load-checkpoint-weights-hdf5-generated-by-multiple-gpus
#model.summary()
model_out = model.layers[-2] #get second last layer in multi_gpu_model i.e. model.get_layer('model_1')
else:
model_out = model
model_out.save_weights(filepath="model-weights.hdf5")
# Reload the model
model = keras_model(img_width=img_width, img_height=img_height)
model.load_weights("model-weights.hdf5")
# Predict on val
preds_val = model.predict(X_val, verbose=1)
# Threshold predictions
#preds_val_t = (preds_val > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5)
# Define IoU metric as a regular function, to manually check result
def cal_iou(A, B):
intersection = np.logical_and(A, B)
union = np.logical_or(A, B)
iou = np.sum(intersection > 0) / np.sum(union > 0)
return iou
# calcualte average iou of validation images, the result from tensorflow seems too high.
iou=[]
for i in range(len(Y_val)):
iou.append(cal_iou(np.squeeze(Y_val[i]), np.squeeze(preds_val_t[i])))
print('Average Validate IOU: {}'.format(round(np.mean(iou),2)))
#plt.figure(figsize=(20,10.5))
plt.figure(figsize=(20,16))
x, y = 16,3
for i in range(y):
for j in range(x):
# train image
plt.subplot(y*3, x, i*3*x+j+1)
pos = i*x+j
plt.imshow(np.squeeze(X_val[pos]))
plt.title('Image #{}\nIOU {}'.format(pos,round(cal_iou(np.squeeze(Y_val[pos]), np.squeeze(preds_val_t[pos])),2)))
plt.axis('off')
plt.subplot(y*3, x, (i*3+1)*x+j+1)
plt.imshow(np.squeeze(Y_val[pos]))
plt.title('Mask')
plt.axis('off')
plt.subplot(y*3, x, (i*3+2)*x+j+1)
plt.imshow(np.squeeze(preds_val_t[pos]))
plt.title('Predict')
plt.axis('off')
plt.show(False)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.