text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## discrete_puzzl.py
##
## R.Hanai 2011.15. -
##
from numpy import *
import operator
import time
# unit vectors
exs = [array([1,0,0]),array([-1,0,0])]
eys = [array([0,1,0]),array([0,-1,0])]
ezs = [array([0,0,1]),array([0,0,-1])]
# piece原点: 3^3=27通り
def make_poss():
poss = []
for i in range(3):
for j in range(3):
for k in range(3):
poss.append(array([i,j,k]))
return poss
# piece姿勢: x軸=6通り * y軸=4通り = 24通り
def make_oris():
oris = []
for ex_prime in exs:
for ey_prime in eys+ezs:
oris.append([ex_prime, ey_prime])
for ex_prime in eys:
for ey_prime in ezs+exs:
oris.append([ex_prime, ey_prime])
for ex_prime in ezs:
for ey_prime in exs+eys:
oris.append([ex_prime, ey_prime])
return oris
poss = make_poss()
oris = make_oris()
class PhPiece:
def __init__(self, no, col, shape):
self.no = no
self.col = col
self.shape = shape
self.qs = self.valid_piece_configs()
# 各pieceについて可能な配置をオフラインで計算する
# 27*6*4通りの内,3x3x3の領域外に出るもの,対称性により重複するものを除く
def valid_piece_configs(self):
qs = []
ps = []
fs = []
for pos in poss:
for ori in oris:
shp = self.put(pos, ori)
if piece_inside(shp):
cube = zeros([3,3,3]).tolist()
for i,j,k in shp:
cube[i][j][k] = 1
if not (cube in ps):
qs.append(shp)
ps.append(cube)
fs.append((pos,ori))
return zip(qs,ps,fs)
def put(self, pos, ori):
ex, ey = ori
ez = cross(ex, ey)
T = transpose(array([ex,ey,ez]))
return [dot(T, p) + pos for p in self.shape]
def __repr__(self):
return '<%d %s>'%(self.no, self.col)
def __str__(self):
return self.__repr__()
def piece_inside(poss):
def pos_inside(pos):
return reduce(operator.__and__, map(lambda x: x>=0 and x<=2, pos))
return reduce(operator.__and__, map(pos_inside, poss))
def put(q, p, f, state, piece):
newstate = copy(state)
for pos in q:
x,y,z = pos
if state[x,y,z] > 0:
return None
else:
newstate[x,y,z] = piece.no
# pruning heuristics
return newstate
nsols = 0
sols = []
def print_solution(nsols, sol):
print 'solution: %d '%nsols
for i,p in sol:
print ' ', p,
print p.qs[i][2]
class SolutionsFound(Exception):
def __init__(self):
pass
def solve(pieces, state, parsol=[], maxsolutions=10, debug=False):
global nsols, sols
if pieces == []:
nsols += 1
sols.append(parsol)
print_solution(nsols, parsol)
if nsols >= maxsolutions:
raise SolutionsFound()
return None
piece = pieces[0]
for i,(q,p,f) in enumerate(piece.qs):
newstate = put(q, p, f, state, piece)
if not newstate == None:
if debug:
print 'put: ', piece
print newstate
raw_input()
parsol.append((i,piece))
solve(pieces[1:], newstate, parsol, maxsolutions=maxsolutions, debug=debug)
parsol.pop()
# definition of pieces
pieces = [PhPiece(1, 'brown', [[0,0,0],[0,1,0],[1,0,0],[1,1,0]]),
PhPiece(2, 'aqua', [[0,0,0],[0,1,0],[0,2,0],[1,1,0]]),
PhPiece(3, 'yellow', [[0,0,0],[0,1,0],[0,2,0],[1,0,0]]),
PhPiece(4, 'red', [[0,0,0],[1,0,0],[1,1,0],[2,1,0]]),
PhPiece(5, 'green', [[0,0,0],[0,1,0],[1,0,0],[0,0,1]]),
PhPiece(6, 'purple', [[0,0,0],[1,0,0],[1,1,0],[0,0,1]]),
PhPiece(7, 'yellow-green', [[0,0,0],[0,1,0],[1,0,0]])]
# pieces = [PhPiece(1, 'brown', [[0,0,0],[0,1,0],[1,0,0],[1,1,0]]),
# PhPiece(2, 'aqua', [[0,0,0],[0,1,0],[0,2,0],[1,1,0]]),
# PhPiece(3, 'yellow', [[0,0,0],[0,1,0],[0,2,0],[1,0,0]]),
# PhPiece(4, 'red', [[0,0,0],[1,0,0],[1,1,0],[2,1,0]]),
# PhPiece(5, 'green', [[0,0,0],[0,1,0],[1,0,0],[0,0,1]]),
# PhPiece(6, 'purple', [[0,0,0],[1,0,0],[1,1,0],[0,0,1]]),
# PhPiece(7, 'yellow-green', [[0,0,0],[0,1,0],[1,0,0]])]
def run(n=10, debug=False):
global nsols, sols
nsols = 0
sols = []
t1 = time.time()
try:
solve(pieces, zeros([3,3,3]), parsol=[], maxsolutions=n, debug=debug)
except:
pass
t2 = time.time()
print (t2-t1),
print ' [secs]'
|
{"hexsha": "56a35e62851060948804ab59af13e2eb6e29c9f9", "size": 4582, "ext": "py", "lang": "Python", "max_stars_repo_path": "iv_scenario/src/discrete_puzzle.py", "max_stars_repo_name": "ryhanai/iv-plan-hironx", "max_stars_repo_head_hexsha": "2f89293a55df4608cb35e6a9676db97b9e486e7d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "iv_scenario/src/discrete_puzzle.py", "max_issues_repo_name": "ryhanai/iv-plan-hironx", "max_issues_repo_head_hexsha": "2f89293a55df4608cb35e6a9676db97b9e486e7d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "iv_scenario/src/discrete_puzzle.py", "max_forks_repo_name": "ryhanai/iv-plan-hironx", "max_forks_repo_head_hexsha": "2f89293a55df4608cb35e6a9676db97b9e486e7d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1104294479, "max_line_length": 87, "alphanum_fraction": 0.5002182453, "include": true, "reason": "from numpy", "num_tokens": 1592}
|
[STATEMENT]
lemma rel_gpv_lift_spmf2: "rel_gpv A B gpv (lift_spmf q) \<longleftrightarrow> (\<exists>p. gpv = lift_spmf p \<and> rel_spmf A p q)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rel_gpv A B gpv (lift_spmf q) = (\<exists>p. gpv = lift_spmf p \<and> rel_spmf A p q)
[PROOF STEP]
by(subst gpv.rel_flip[symmetric])(simp add: rel_gpv_lift_spmf1 pmf.rel_flip option.rel_conversep)
|
{"llama_tokens": 199, "file": "CryptHOL_Generative_Probabilistic_Value", "length": 1}
|
"""Utility functions for dealing with vertical coordinates."""
import logging
import numpy as np
import xarray as xr
from .._constants import GRAV_EARTH
from ..var import Var
from .. import internal_names
def to_radians(arr, is_delta=False):
"""Force data with units either degrees or radians to be radians."""
# Infer the units from embedded metadata, if it's there.
try:
units = arr.units
except AttributeError:
pass
else:
if units.lower().startswith('degrees'):
warn_msg = ("Conversion applied: degrees -> radians to array: "
"{}".format(arr))
logging.debug(warn_msg)
return np.deg2rad(arr)
# Otherwise, assume degrees if the values are sufficiently large.
threshold = 0.1*np.pi if is_delta else 4*np.pi
if np.max(np.abs(arr)) > threshold:
warn_msg = ("Conversion applied: degrees -> radians to array: "
"{}".format(arr))
logging.debug(warn_msg)
return np.deg2rad(arr)
return arr
def to_pascal(arr, is_dp=False):
"""Force data with units either hPa or Pa to be in Pa."""
threshold = 400 if is_dp else 1200
if np.max(np.abs(arr)) < threshold:
warn_msg = "Conversion applied: hPa -> Pa to array: {}".format(arr)
logging.debug(warn_msg)
return arr*100.
return arr
def to_hpa(arr):
"""Convert pressure array from Pa to hPa (if needed)."""
if np.max(np.abs(arr)) > 1200.:
warn_msg = "Conversion applied: Pa -> hPa to array: {}".format(arr)
logging.debug(warn_msg)
return arr / 100.
return arr
def phalf_from_ps(bk, pk, ps):
"""Compute pressure of half levels of hybrid sigma-pressure coordinates."""
return ps*bk + pk
def replace_coord(arr, old_dim, new_dim, new_coord):
"""Replace a coordinate with new one; new and old must have same shape."""
new_arr = arr.rename({old_dim: new_dim})
new_arr[new_dim] = new_coord
return new_arr
def to_pfull_from_phalf(arr, pfull_coord):
"""Compute data at full pressure levels from values at half levels."""
phalf_top = arr.isel(**{internal_names.PHALF_STR: slice(1, None)})
phalf_top = replace_coord(phalf_top, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
phalf_bot = arr.isel(**{internal_names.PHALF_STR: slice(None, -1)})
phalf_bot = replace_coord(phalf_bot, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
return 0.5*(phalf_bot + phalf_top)
def to_phalf_from_pfull(arr, val_toa=0, val_sfc=0):
"""Compute data at half pressure levels from values at full levels.
Could be the pressure array itself, but it could also be any other data
defined at pressure levels. Requires specification of values at surface
and top of atmosphere.
"""
phalf = np.zeros((arr.shape[0] + 1, arr.shape[1], arr.shape[2]))
phalf[0] = val_toa
phalf[-1] = val_sfc
phalf[1:-1] = 0.5*(arr[:-1] + arr[1:])
return phalf
def pfull_from_ps(bk, pk, ps, pfull_coord):
"""Compute pressure at full levels from surface pressure."""
return to_pfull_from_phalf(phalf_from_ps(bk, pk, ps), pfull_coord)
def d_deta_from_phalf(arr, pfull_coord):
"""Compute pressure level thickness from half level pressures."""
d_deta = arr.diff(dim=internal_names.PHALF_STR, n=1)
return replace_coord(d_deta, internal_names.PHALF_STR,
internal_names.PFULL_STR, pfull_coord)
def d_deta_from_pfull(arr):
"""Compute $\partial/\partial\eta$ of the array on full hybrid levels.
$\eta$ is the model vertical coordinate, and its value is assumed to simply
increment by 1 from 0 at the surface upwards. The data to be differenced
is assumed to be defined at full pressure levels.
Parameters
----------
arr : xarray.DataArray containing the 'pfull' dim
Returns
-------
deriv : xarray.DataArray with the derivative along 'pfull' computed via
2nd order centered differencing.
""" # noqa: W605
right = arr[{internal_names.PFULL_STR: slice(2, None, None)}].values
left = arr[{internal_names.PFULL_STR: slice(0, -2, 1)}].values
deriv = xr.DataArray(np.zeros(arr.shape), dims=arr.dims,
coords=arr.coords)
deriv[{internal_names.PFULL_STR: slice(1, -1, 1)}] = (right - left) / 2.
deriv[{internal_names.PFULL_STR: 0}] = (
arr[{internal_names.PFULL_STR: 1}].values -
arr[{internal_names.PFULL_STR: 0}].values)
deriv[{internal_names.PFULL_STR: -1}] = (
arr[{internal_names.PFULL_STR: -1}].values -
arr[{internal_names.PFULL_STR: -2}].values)
return deriv
def dp_from_ps(bk, pk, ps, pfull_coord):
"""Compute pressure level thickness from surface pressure"""
return d_deta_from_phalf(phalf_from_ps(bk, pk, ps), pfull_coord)
def integrate(arr, ddim, dim=False, is_pressure=False):
"""Integrate along the given dimension."""
if is_pressure:
dim = vert_coord_name(ddim)
return (arr*ddim).sum(dim=dim)
def get_dim_name(arr, names):
"""Determine if an object has an attribute name matching a given list."""
for name in names:
# TODO: raise warning/exception when multiple names arr attrs.
if hasattr(arr, name):
return name
raise AttributeError("No attributes of the object `{0}` match the "
"specified names of `{1}`".format(arr, names))
def vert_coord_name(arr):
return get_dim_name(arr, [internal_names.PLEVEL_STR,
internal_names.PFULL_STR])
def int_dp_g(arr, dp):
"""Mass weighted integral."""
return integrate(arr, to_pascal(dp, is_dp=True),
vert_coord_name(dp)) / GRAV_EARTH
def dp_from_p(p, ps, p_top=0., p_bot=1.1e5):
"""Get level thickness of pressure data, incorporating surface pressure.
Level edges are defined as halfway between the levels, as well as the user-
specified uppermost and lowermost values. The dp of levels whose bottom
pressure is less than the surface pressure is not changed by ps, since they
don't intersect the surface. If ps is in between a level's top and bottom
pressures, then its dp becomes the pressure difference between its top and
ps. If ps is less than a level's top and bottom pressures, then that level
is underground and its values are masked.
Note that postprocessing routines (e.g. at GFDL) typically mask out data
wherever the surface pressure is less than the level's given value, not the
level's upper edge. This masks out more levels than the
"""
p_str = get_dim_name(p, (internal_names.PLEVEL_STR, 'plev'))
p_vals = to_pascal(p.values.copy())
# Layer edges are halfway between the given pressure levels.
p_edges_interior = 0.5*(p_vals[:-1] + p_vals[1:])
p_edges = np.concatenate(([p_bot], p_edges_interior, [p_top]))
p_edge_above = p_edges[1:]
p_edge_below = p_edges[:-1]
dp = p_edge_below - p_edge_above
if not all(np.sign(dp)):
raise ValueError("dp array not all > 0 : {}".format(dp))
# Pressure difference between ps and the upper edge of each pressure level.
p_edge_above_xr = xr.DataArray(p_edge_above, dims=p.dims, coords=p.coords)
dp_to_sfc = ps - p_edge_above_xr
# Find the level adjacent to the masked, under-ground levels.
change = xr.DataArray(np.zeros(dp_to_sfc.shape), dims=dp_to_sfc.dims,
coords=dp_to_sfc.coords)
change[{p_str: slice(1, None)}] = np.diff(
np.sign(ps - to_pascal(p.copy()))
)
dp_combined = xr.DataArray(np.where(change, dp_to_sfc, dp),
dims=dp_to_sfc.dims, coords=dp_to_sfc.coords)
# Mask levels that are under ground.
above_ground = ps > to_pascal(p.copy())
above_ground[p_str] = p[p_str]
dp_with_ps = dp_combined.where(above_ground)
# Revert to original dim order.
possible_dim_orders = [
(internal_names.TIME_STR, p_str, internal_names.LAT_STR,
internal_names.LON_STR),
(internal_names.TIME_STR, p_str, internal_names.LAT_STR),
(internal_names.TIME_STR, p_str, internal_names.LON_STR),
(internal_names.TIME_STR, p_str),
(p_str, internal_names.LAT_STR, internal_names.LON_STR),
(p_str, internal_names.LAT_STR),
(p_str, internal_names.LON_STR),
(p_str,),
]
for dim_order in possible_dim_orders:
try:
return dp_with_ps.transpose(*dim_order)
except ValueError:
logging.debug("Failed transpose to dims: {}".format(dim_order))
else:
logging.debug("No transpose was successful.")
return dp_with_ps
def level_thickness(p, p_top=0., p_bot=1.01325e5):
"""
Calculates the thickness, in Pa, of each pressure level.
Assumes that the pressure values given are at the center of that model
level, except for the lowest value (typically 1000 hPa), which is the
bottom boundary. The uppermost level extends to 0 hPa.
Unlike `dp_from_p`, this does not incorporate the surface pressure.
"""
p_vals = to_pascal(p.values.copy())
dp_vals = np.empty_like(p_vals)
# Bottom level extends from p[0] to halfway betwen p[0] and p[1].
dp_vals[0] = p_bot - 0.5*(p_vals[0] + p_vals[1])
# Middle levels extend from halfway between [k-1], [k] and [k], [k+1].
dp_vals[1:-1] = 0.5*(p_vals[0:-2] - p_vals[2:])
# Top level extends from halfway between top two levels to 0 hPa.
dp_vals[-1] = 0.5*(p_vals[-2] + p_vals[-1]) - p_top
dp = p.copy()
dp.values = dp_vals
return dp
def does_coord_increase_w_index(arr):
"""Determine if the array values increase with the index.
Useful, e.g., for pressure, which sometimes is indexed surface to TOA and
sometimes the opposite.
"""
diff = np.diff(arr)
if not np.all(np.abs(np.sign(diff))):
raise ValueError("Array is not monotonic: {}".format(arr))
# Since we know its monotonic, just test the first value.
return bool(diff[0])
bk = Var(
name=internal_names.BK_STR,
alt_names=internal_names.GRID_ATTRS[internal_names.BK_STR],
def_vert=True,
def_time=False,
def_lon=False,
def_lat=False
)
pk = Var(
name=internal_names.PK_STR,
alt_names=internal_names.GRID_ATTRS[internal_names.PK_STR],
def_vert=True,
def_time=False,
def_lon=False,
def_lat=False
)
pfull_coord = Var(
name=internal_names.PFULL_STR,
alt_names=internal_names.GRID_ATTRS[internal_names.PFULL_STR],
def_vert=True,
def_time=False,
def_lon=False,
def_lat=False
)
ps = Var(
name='ps',
domain='atmos',
description='Surface pressure',
units='Pa',
def_vert=False,
def_time=True,
def_lon=True,
def_lat=True
)
p_level = Var(
name='p',
alt_names=internal_names.GRID_ATTRS[internal_names.PLEVEL_STR],
domain='atmos',
description='Pressure on interpolated levels',
units='Pa',
def_vert=True,
def_time=False,
def_lon=False,
def_lat=False
)
dp_level = Var(
name='dp',
description='Pressure thickness of model levels',
units='Pa',
def_vert=True,
def_time=True,
def_lon=True,
def_lat=True,
func=dp_from_p,
variables=(p_level, ps)
)
p_eta = Var(
name='p',
description='Pressure at model-native level midpoints',
units='Pa',
def_vert=True,
def_time=True,
def_lon=True,
def_lat=True,
func=pfull_from_ps,
variables=(bk, pk, ps, pfull_coord)
)
dp_eta = Var(
name='dp',
description='Pressure thickness of model levels',
units='Pa',
def_vert=True,
def_time=True,
def_lon=True,
def_lat=True,
func=dp_from_ps,
variables=(bk, pk, ps, pfull_coord)
)
|
{"hexsha": "bb5ea66bd4dcc35093df898e25638f482539db1a", "size": 11880, "ext": "py", "lang": "Python", "max_stars_repo_path": "aospy/utils/vertcoord.py", "max_stars_repo_name": "spencerahill/aospy", "max_stars_repo_head_hexsha": "6c8df45705927476e140df903bcb88e5abadae22", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2015-10-25T17:19:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-31T19:09:18.000Z", "max_issues_repo_path": "aospy/utils/vertcoord.py", "max_issues_repo_name": "spencerahill/aospy", "max_issues_repo_head_hexsha": "6c8df45705927476e140df903bcb88e5abadae22", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 317, "max_issues_repo_issues_event_min_datetime": "2015-01-12T19:01:30.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-16T16:54:08.000Z", "max_forks_repo_path": "aospy/utils/vertcoord.py", "max_forks_repo_name": "spencerahill/aospy", "max_forks_repo_head_hexsha": "6c8df45705927476e140df903bcb88e5abadae22", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2015-06-11T16:06:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-01T13:47:15.000Z", "avg_line_length": 33.0, "max_line_length": 79, "alphanum_fraction": 0.6614478114, "include": true, "reason": "import numpy", "num_tokens": 3056}
|
import tempfile
import pytest
from hypothesis import given
import astropy.units as u
import sunpy.net.dataretriever.sources.goes as goes
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.net.dataretriever.client import QueryResponse
from sunpy.net.tests.strategies import time_attr
from sunpy.time import TimeRange, parse_time
@pytest.fixture
def suvi_client():
return goes.SUVIClient()
@given(time_attr())
def test_can_handle_query(time):
# Don't use the fixture, as hypothesis complains
suvi_client = goes.SUVIClient()
ans1 = suvi_client._can_handle_query(time, a.Instrument.suvi)
assert ans1 is True
ans2 = suvi_client._can_handle_query(time, a.Instrument.suvi,
a.Wavelength(131 * u.Angstrom))
assert ans2 is True
ans3 = suvi_client._can_handle_query(time, a.Instrument.suvi,
a.Wavelength(131 * u.Angstrom),
a.Level.two)
assert ans3 is True
ans4 = suvi_client._can_handle_query(time)
assert ans4 is False
ans5 = suvi_client._can_handle_query(time, a.Instrument.aia)
assert ans5 is False
ans6 = suvi_client._can_handle_query(time, a.Instrument.suvi,
a.goes.SatelliteNumber(16))
assert ans6 is True
def test_get_goes_sat_num(suvi_client):
date = parse_time('2019/06/11 00:00')
min_satellite_number = 16 # when SUVI was first included
assert suvi_client._get_goes_sat_num(date) >= min_satellite_number
assert type(suvi_client._get_goes_sat_num(date)) is int
def test_get_goes_sat_num_error(suvi_client):
date = parse_time('1800/06/11 00:00')
with pytest.raises(ValueError):
suvi_client._get_goes_sat_num(date)
def test_get_url_for_timerange_errors(suvi_client):
"""Check that unsupported values raise errors."""
tr = TimeRange('2019/06/11 00:00', '2019/06/11 00:10')
with pytest.raises(ValueError):
suvi_client._get_url_for_timerange(tr, level=0)
with pytest.raises(ValueError):
suvi_client._get_url_for_timerange(tr, wavelength=100 * u.Angstrom)
with pytest.raises(ValueError):
suvi_client._get_url_for_timerange(tr, satellitenumber=1)
def mock_query_object(suvi_client):
"""
Creating a Query Response object and prefilling it with some information
"""
# Creating a Query Response Object
start = '2019/05/25 00:50'
end = '2019/05/25 00:52'
wave = 94 * u.Angstrom
obj = {
'TimeRange': TimeRange(parse_time(start), parse_time(end)),
'Time_start': parse_time(start),
'Time_end': parse_time(end),
'source': 'GOES',
'instrument': 'SUVI',
'physobs': 'flux',
'provider': 'NOAA',
'wavelength': wave
}
urls = ['https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites'
'/goes/goes16/l2/data/suvi-l2-ci094/2019/05/25/'
'dr_suvi-l2-ci094_g16_s20190525T005200Z_e20190525T005600Z_v1-0-0.fits']
results = QueryResponse.create(obj, urls, client=suvi_client)
return results
def test_attr_reg():
a.Instrument.suvi = a.Instrument("SUVI")
a.goes.SatelliteNumber.A16 = a.goes.SatelliteNumber("16")
@pytest.mark.remote_data
def test_fetch_working(suvi_client):
"""
Tests if the online server for goes_suvi is working.
This also checks if the mock is working well.
"""
start = '2019/05/25 00:50'
end = '2019/05/25 00:52'
wave = 94 * u.Angstrom
qr1 = suvi_client.search(a.Time(start, end), a.Instrument.suvi, a.Wavelength(wave))
# Mock QueryResponse object
mock_qr = mock_query_object(suvi_client)
# Compare if two objects have the same attribute
mock_qr = mock_qr.blocks[0]
qr = qr1.blocks[0]
assert mock_qr.source == qr.source
assert mock_qr.provider == qr.provider
assert mock_qr.physobs == qr.physobs
assert mock_qr.instrument == qr.instrument
assert mock_qr.url == qr.url
assert qr1.time_range() == TimeRange("2019-05-25T00:52:00.000",
"2019-05-25T00:56:00.000")
with tempfile.TemporaryDirectory() as tmpdirname:
download_list = suvi_client.fetch(qr1, path=tmpdirname)
assert len(download_list) == len(qr1)
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end, wave, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:52', 94, 1),
('2019/05/25 00:50', '2019/05/25 00:52', 131, 1),
('2019/05/25 00:50', '2019/05/25 00:52', 171, 1),
('2019/05/25 00:50', '2019/05/25 00:52', 195, 1),
('2019/05/25 00:50', '2019/05/25 00:52', 284, 1),
('2019/05/25 00:50', '2019/05/25 00:52', 304, 1)]
)
def test_get_url_for_time_range_level2(suvi_client, start, end, wave, expected_num_files):
urls = suvi_client._get_url_for_timerange(TimeRange(start, end),
wavelength=wave * u.Angstrom,
level=2)
assert isinstance(urls, list)
assert len(urls) == expected_num_files
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:52', 6)]
)
def test_get_url_for_time_range_level2_allwave(suvi_client, start, end, expected_num_files):
"""check that we get all wavelengths if no wavelength is given"""
urls = suvi_client._get_url_for_timerange(TimeRange(start, end), level=2)
assert isinstance(urls, list)
assert len(urls) == expected_num_files
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end ,wave, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:54', 94, 6),
('2019/05/25 00:50', '2019/05/25 00:54', 131, 3),
('2019/05/25 00:50', '2019/05/25 00:54', 171, 2),
('2019/05/25 00:50', '2019/05/25 00:54', 195, 7),
('2019/05/25 00:50', '2019/05/25 00:54', 284, 2),
('2019/05/25 00:50', '2019/05/25 00:54', 304, 4)]
)
def test_get_url_for_time_range_level1b(suvi_client, start, end, wave, expected_num_files):
"""check that we get all wavelengths if no wavelength is given"""
urls = suvi_client._get_url_for_timerange(TimeRange(start, end),
wavelength=wave * u.Angstrom,
level='1b')
assert isinstance(urls, list)
assert len(urls) == expected_num_files
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end ,wave, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:54', 94, 6),
('2019/05/25 00:50', '2019/05/25 00:54', 131, 3),
('2019/05/25 00:50', '2019/05/25 00:54', 171, 2),
('2019/05/25 00:50', '2019/05/25 00:54', 195, 7),
('2019/05/25 00:50', '2019/05/25 00:54', 284, 2),
('2019/05/25 00:50', '2019/05/25 00:54', 304, 4)]
)
def test_fido_onewave_level1b(start, end, wave, expected_num_files):
result = Fido.search(a.Time(start, end), a.Instrument.suvi,
a.Wavelength(wave * u.Angstrom), a.Level('1b'))
assert result.file_num == expected_num_files
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end, wave1, wave2, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:54', 1, 100, 6),
('2019/05/25 00:50', '2019/05/25 00:54', 1, 150, 9),
('2019/05/25 00:50', '2019/05/25 00:54', 1, 180, 11),
('2019/05/25 00:50', '2019/05/25 00:54', 1, 200, 18),
('2019/05/25 00:50', '2019/05/25 00:54', 1, 300, 20),
('2019/05/25 00:50', '2019/05/25 00:54', 1, 310, 24)]
)
def test_fido_waverange_level1b(start, end, wave1, wave2, expected_num_files):
"""check that we get all wavelengths if no wavelength is given"""
result = Fido.search(a.Time(start, end), a.Instrument.suvi,
a.Wavelength(wave1 * u.Angstrom, wave2 * u.Angstrom),
a.Level('1b'))
assert result.file_num == expected_num_files
@pytest.mark.remote_data
@pytest.mark.parametrize("start, end, expected_num_files",
[('2019/05/25 00:50', '2019/05/25 00:52', 6)]
)
def test_query(suvi_client, start, end, expected_num_files):
qr1 = suvi_client.search(a.Time(start, end), a.Instrument.suvi)
assert isinstance(qr1, QueryResponse)
assert len(qr1) == expected_num_files
assert qr1.time_range().start == parse_time('2019/05/25 00:52')
assert qr1.time_range().end == parse_time('2019/05/25 00:56')
def test_show(suvi_client):
mock_qr = mock_query_object(suvi_client)
qrshow0 = mock_qr.show()
qrshow1 = mock_qr.show('Start Time', 'Instrument')
allcols = ['Start Time', 'End Time', 'Source', 'Instrument', 'Wavelength']
assert qrshow0.colnames == allcols
assert qrshow1.colnames == ['Start Time', 'Instrument']
assert qrshow0['Instrument'][0] == 'SUVI'
|
{"hexsha": "82dbd6d4c69f752462e04e4c69edd1bf4e97e3de", "size": 9516, "ext": "py", "lang": "Python", "max_stars_repo_path": "sunpy/net/dataretriever/sources/tests/test_goes_suvi.py", "max_stars_repo_name": "jmason86/sunpy", "max_stars_repo_head_hexsha": "d3339ca999d79c53ac984f4d3215b6dbefbeb734", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sunpy/net/dataretriever/sources/tests/test_goes_suvi.py", "max_issues_repo_name": "jmason86/sunpy", "max_issues_repo_head_hexsha": "d3339ca999d79c53ac984f4d3215b6dbefbeb734", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sunpy/net/dataretriever/sources/tests/test_goes_suvi.py", "max_forks_repo_name": "jmason86/sunpy", "max_forks_repo_head_hexsha": "d3339ca999d79c53ac984f4d3215b6dbefbeb734", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7368421053, "max_line_length": 92, "alphanum_fraction": 0.6030895334, "include": true, "reason": "import astropy", "num_tokens": 2746}
|
# Copyright (c) 2021. yoshida-lab. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import re
import numpy as np
from pymatgen.core import Element
from pymatgen.analysis.local_env import VoronoiNN
from xenonpy.descriptor.base import BaseDescriptor, BaseFeaturizer
__all__ = ['RadialDistributionFunction', 'OrbitalFieldMatrix', 'Structures']
class RadialDistributionFunction(BaseFeaturizer):
"""
Calculate pair distribution descriptor for machine learning.
"""
@property
def feature_labels(self):
return [str(d) for d in self._interval[1:]]
def __init__(self, n_bins=201, r_max=20.0, *, n_jobs=-1, on_errors='raise', return_type='any', target_col=None):
"""
Parameters
----------
n_bins: int
Number of radial grid points.
r_max: float
Maximum of radial grid (the minimum is always set zero).
n_jobs: int
The number of jobs to run in parallel for both fit and predict. Set -1 to use all cpu cores (default).
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
return_type: str
Specific the return type.
Can be ``any``, ``array`` and ``df``.
``array`` and ``df`` force return type to ``np.ndarray`` and ``pd.DataFrame`` respectively.
If ``any``, the return type dependent on the input type.
Default is ``any``
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
assert n_bins >= 1, "n_bins should be greater than 1!"
assert r_max > 0, "r_max should be greater than 0!"
self.n_bins = n_bins
self.r_max = r_max
self.dr = r_max / (n_bins - 1)
self._interval = np.arange(0.0, r_max + self.dr, self.dr)
self.__authors__ = ['TsumiNa']
def featurize(self, structure):
"""
Get RDF of the input structure.
Args:
structure: Pymatgen Structure object.
Returns:
rdf, dist: (tuple of arrays) the first element is the
normalized RDF, whereas the second element is
the inner radius of the RDF bin.
"""
if not structure.is_ordered:
raise ValueError("Disordered structure support not built yet")
# Get the distances between all atoms
neighbors_lst = structure.get_all_neighbors(self.r_max)
all_distances = np.concatenate(tuple(map(lambda x: [e[1] for e in x], neighbors_lst)))
# Compute a histogram
dist_hist, dist_bins = np.histogram(all_distances, bins=self._interval, density=False)
# Normalize counts
shell_vol = 4.0 / 3.0 * np.pi * (np.power(dist_bins[1:], 3) - np.power(dist_bins[:-1], 3))
number_density = structure.num_sites / structure.volume
return dist_hist / shell_vol / number_density
class OrbitalFieldMatrix(BaseFeaturizer):
"""
Representation based on the valence shell electrons of neighboring atoms.
Each atom is described by a 32-element vector uniquely representing the
valence subshell. A 32x32 (39x39) matrix is formed by multiplying two
atomic vectors. An OFM for an atomic environment is the sum of these
matrices for each atom the center atom coordinates with multiplied by a
distance function (In this case, 1/r times the weight of the coordinating
atom in the Voronoi.
"""
def __init__(self, including_d=True, *, n_jobs=-1, on_errors='raise', return_type='any', target_col=None):
"""
Parameters
----------
including_d: bool
If true, add distance information.
n_jobs: int
The number of jobs to run in parallel for both fit and predict. Set -1 to use all cpu cores (default).
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
return_type: str
Specific the return type.
Can be ``any``, ``array`` and ``df``.
``array`` and ``df`` force return type to ``np.ndarray`` and ``pd.DataFrame`` respectively.
If ``any``, the return type dependent on the input type.
Default is ``any``
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self._including_d = including_d
self.__authors__ = ['TsumiNa']
self.__citations__ = [
'''
@article{LamPham2017,
archivePrefix = {arXiv},
arxivId = {1705.01043},
author = {{Lam Pham}, Tien and Kino, Hiori and Terakura, Kiyoyuki and Miyake, Takashi and Tsuda, Koji and Takigawa, Ichigaku and {Chi Dam}, Hieu},
doi = {10.1080/14686996.2017.1378060},
eprint = {1705.01043},
issn = {18785514},
journal = {Science and Technology of Advanced Materials},
keywords = {Material descriptor,data mining,machine learning,magnetic materials,material informatics},
number = {1},
pages = {756--765},
pmid = {29152012},
publisher = {Taylor {\&} Francis},
title = {{Machine learning reveals orbital interaction in materials}},
url = {https://doi.org/10.1080/14686996.2017.1378060},
volume = {18},
year = {2017}
}
'''
]
@staticmethod
def get_element_representation(name):
"""
generate one-hot representation for a element, e.g, si = [0.0, 1.0, 0.0, 0.0, ...]
Parameters
----------
name: string
element symbol
"""
element = Element(name)
general_element_electronic = {
's1': 0.0,
's2': 0.0,
'p1': 0.0,
'p2': 0.0,
'p3': 0.0,
'p4': 0.0,
'p5': 0.0,
'p6': 0.0,
'd1': 0.0,
'd2': 0.0,
'd3': 0.0,
'd4': 0.0,
'd5': 0.0,
'd6': 0.0,
'd7': 0.0,
'd8': 0.0,
'd9': 0.0,
'd10': 0.0,
'f1': 0.0,
'f2': 0.0,
'f3': 0.0,
'f4': 0.0,
'f5': 0.0,
'f6': 0.0,
'f7': 0.0,
'f8': 0.0,
'f9': 0.0,
'f10': 0.0,
'f11': 0.0,
'f12': 0.0,
'f13': 0.0,
'f14': 0.0
}
general_electron_subshells = [
's1', 's2', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'd1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9', 'd10',
'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13', 'f14'
]
if name == 'H':
element_electronic_structure = ['s1']
elif name == 'He':
element_electronic_structure = ['s2']
else:
element_electronic_structure = [
''.join(pair) for pair in re.findall(r"\.\d(\w+)<sup>(\d+)</sup>", element.electronic_structure)
]
for eletron_subshell in element_electronic_structure:
general_element_electronic[eletron_subshell] = 1.0
return np.array([general_element_electronic[key] for key in general_electron_subshells])
def featurize(self, structure, is_including_d=True):
"""
Generate OFM descriptor
Parameters
----------
structure: pymatgen.Structure
The input structure for OFM calculation.
"""
atoms = np.array([site.species_string for site in structure])
coordinator_finder = VoronoiNN(cutoff=10.0)
local_orbital_field_matrices = []
for i_atom, atom in enumerate(atoms):
neighbors = coordinator_finder.get_nn_info(structure=structure, n=i_atom)
site = structure[i_atom]
center_vector = self.get_element_representation(atom)
env_vector = np.zeros(32)
for nn in neighbors:
site_x = nn['site']
w = nn['weight']
site_x_label = site_x.species_string
neigh_vector = self.get_element_representation(site_x_label)
d = np.sqrt(np.sum((site.coords - site_x.coords)**2))
if self._including_d:
env_vector += neigh_vector * w / d
else:
env_vector += neigh_vector * w
local_matrix = center_vector[None, :] * env_vector[:, None]
local_matrix = np.ravel(local_matrix)
local_orbital_field_matrices.append(local_matrix)
return np.array(local_orbital_field_matrices).mean(axis=0)
@property
def feature_labels(self):
labels = np.array([
's1', 's2', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'd1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9', 'd10',
'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13', 'f14'
])
return [i + '_' + j for i in labels for j in labels]
class Structures(BaseDescriptor):
"""
Calculate structure descriptors from compound's structure.
"""
def __init__(self,
n_bins=201,
r_max=20.0,
including_d=True,
*,
n_jobs=-1,
featurizers='all',
on_errors='raise',
target_col=None):
"""
Parameters
----------
n_bins: int
Number of radial grid points.
r_max: float
Maximum of radial grid (the minimum is always set zero).
including_d: bool
If true, add distance information.
n_jobs: int
The number of jobs to run in parallel for both fit and predict. Set -1 to use all cpu cores (default).
featurizers: list[str] or 'all'
Featurizers that will be used.
Default is 'all'.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(featurizers=featurizers)
self.n_jobs = n_jobs
self.structure = RadialDistributionFunction(n_bins,
r_max,
n_jobs=n_jobs,
on_errors=on_errors,
target_col=target_col)
self.structure = OrbitalFieldMatrix(including_d, n_jobs=n_jobs, on_errors=on_errors, target_col=target_col)
|
{"hexsha": "7db52d2420d9f4b3782f374b8fc5e281f0812c59", "size": 12500, "ext": "py", "lang": "Python", "max_stars_repo_path": "xenonpy/descriptor/structure.py", "max_stars_repo_name": "mori0711/XenonPy", "max_stars_repo_head_hexsha": "e36ca0ea112b45ee629cd980c88e80cd6c96c514", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 93, "max_stars_repo_stars_event_min_datetime": "2018-02-11T23:43:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T02:40:11.000Z", "max_issues_repo_path": "xenonpy/descriptor/structure.py", "max_issues_repo_name": "mori0711/XenonPy", "max_issues_repo_head_hexsha": "e36ca0ea112b45ee629cd980c88e80cd6c96c514", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 192, "max_issues_repo_issues_event_min_datetime": "2018-04-20T04:32:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T05:59:18.000Z", "max_forks_repo_path": "xenonpy/descriptor/structure.py", "max_forks_repo_name": "mori0711/XenonPy", "max_forks_repo_head_hexsha": "e36ca0ea112b45ee629cd980c88e80cd6c96c514", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 51, "max_forks_repo_forks_event_min_datetime": "2018-01-18T08:08:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T05:52:22.000Z", "avg_line_length": 39.0625, "max_line_length": 162, "alphanum_fraction": 0.55304, "include": true, "reason": "import numpy", "num_tokens": 3042}
|
#! /usr/bin/env python
from obspy.core.stream import Stream
from numpy.testing import assert_equal
from geomagio.algorithm import Algorithm
def test_algorithm_process():
"""Algorithm_test.test_algorithm_process()
confirms that algorithm.process returns an obspy.core.stream object
"""
algorithm = Algorithm()
timeseries = Stream()
outputstream = algorithm.process(timeseries)
assert_equal(isinstance(outputstream, Stream), True)
def test_algorithm_channels():
"""Algorithm_test.test_algorithm_channels()
confirms that algorithm.get_input_channels returns the correct channels
confirms that algorithm.get_output_channels returns the correct channels
"""
inchannels = ["H", "E", "Z", "F"]
outchannels = ["H", "D", "Z", "F"]
algorithm = Algorithm(inchannels=inchannels, outchannels=outchannels)
assert_equal(algorithm.get_input_channels(), inchannels)
assert_equal(algorithm.get_output_channels(), outchannels)
|
{"hexsha": "986a1512a3b061f551470c051e7ef7cb1126293a", "size": 978, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/algorithm_test/Algorithm_test.py", "max_stars_repo_name": "usgs/geomag-algorithms", "max_stars_repo_head_hexsha": "a83a0e36bed9307828e37b9130c25dbc26dd1bc9", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2015-10-06T17:57:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T18:40:17.000Z", "max_issues_repo_path": "test/algorithm_test/Algorithm_test.py", "max_issues_repo_name": "usgs/geomag-algorithms", "max_issues_repo_head_hexsha": "a83a0e36bed9307828e37b9130c25dbc26dd1bc9", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 229, "max_issues_repo_issues_event_min_datetime": "2015-01-26T20:10:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:46:33.000Z", "max_forks_repo_path": "test/algorithm_test/Algorithm_test.py", "max_forks_repo_name": "alejandrodelcampillo/geomag-algorithms", "max_forks_repo_head_hexsha": "43a734d63a8eb2a696f14237e0054e21d36de7c3", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 44, "max_forks_repo_forks_event_min_datetime": "2015-03-03T16:18:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-06T17:07:38.000Z", "avg_line_length": 33.724137931, "max_line_length": 76, "alphanum_fraction": 0.7484662577, "include": true, "reason": "from numpy", "num_tokens": 198}
|
```python
from sympy import init_session
init_session()
```
IPython console for SymPy 1.5.1 (Python 3.6.10-64-bit) (ground types: gmpy)
These commands were executed:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
>>> init_printing()
Documentation can be found at https://docs.sympy.org/1.5.1/
```python
#init_printing?
```
### Parameters and two Gaussians
```python
a, b, c, a1, a2 = symbols('a b c a1 a2', positive=True, real=True)
```
```python
g1=exp(-a1*x**2)
g2=exp(-a2*x**2)
g1, g2
```
### Normalization constant
```python
N=integrate(g1*g1, (x, -oo, oo))
N
```
```python
1/sqrt(N)
```
```python
printing.sstrrepr(1/sqrt(N))
```
'2**(1/4)*a1**(1/4)/pi**(1/4)'
### Overlap integral S
```python
S=integrate(g1*g2, (x, -oo, oo))
S
```
```python
S.simplify()
```
```python
printing.sstrrepr(S.simplify())
```
'sqrt(pi)/sqrt(a1 + a2)'
### Kinetic energy $T = -\frac{\hbar^2}{2m} \frac{d^2}{dx^2} = \frac{1}{2m}\left(\frac{\hbar}{i}\frac{d}{dx} \right)^2$
```python
d1=diff(g1,x)
d2=diff(g2,x)
d1, d2
```
```python
T = 1/2 * integrate(d1*d2, (x, -oo, oo))
#T=T.simplify()
#T=T.factor()
T.factor()
```
```python
printing.sstrrepr(T.factor())
```
'1.0*sqrt(pi)*a1*a2/(a1 + a2)**(3/2)'
### Potential $V(x) = (ax^2 - b)e^{-cx^2}$
```python
v=(a*x**2-b)*exp(-c*x**2)
v
```
```python
V = integrate(g1*v*g2, (x, -oo, oo))
V
```
```python
V.factor()
```
```python
printing.sstrrepr(V.factor())
```
'sqrt(pi)*(a - 2*a1*b - 2*a2*b - 2*b*c)/(2*(a1 + a2 + c)**(3/2))'
```python
```
|
{"hexsha": "db4a1c887cb9b0b0ba5017c4f1b98281869d54aa", "size": 42048, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "notebooks/GTO_integrals/.ipynb_checkpoints/GTO_1D_S-checkpoint.ipynb", "max_stars_repo_name": "tsommerfeld/L2-methods_for_resonances", "max_stars_repo_head_hexsha": "acba48bfede415afd99c89ff2859346e1eb4f96c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/GTO_integrals/.ipynb_checkpoints/GTO_1D_S-checkpoint.ipynb", "max_issues_repo_name": "tsommerfeld/L2-methods_for_resonances", "max_issues_repo_head_hexsha": "acba48bfede415afd99c89ff2859346e1eb4f96c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/GTO_integrals/.ipynb_checkpoints/GTO_1D_S-checkpoint.ipynb", "max_forks_repo_name": "tsommerfeld/L2-methods_for_resonances", "max_forks_repo_head_hexsha": "acba48bfede415afd99c89ff2859346e1eb4f96c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 87.9665271967, "max_line_length": 7784, "alphanum_fraction": 0.8304794521, "converted": true, "num_tokens": 671}
|
import numpy as np
import torch.nn as nn
import vsffdnet.basicblock as B
import torch
"""
# --------------------------------------------
# FFDNet (15 or 12 conv layers)
# --------------------------------------------
Reference:
@article{zhang2018ffdnet,
title={FFDNet: Toward a fast and flexible solution for CNN-based image denoising},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
journal={IEEE Transactions on Image Processing},
volume={27},
number={9},
pages={4608--4622},
year={2018},
publisher={IEEE}
}
"""
# --------------------------------------------
# FFDNet
# --------------------------------------------
class FFDNet(nn.Module):
def __init__(self, in_nc=1, out_nc=1, nc=64, nb=15, act_mode='R'):
"""
# ------------------------------------
in_nc: channel number of input
out_nc: channel number of output
nc: channel number
nb: total number of conv layers
act_mode: batch norm + activation function; 'BR' means BN+ReLU.
# ------------------------------------
# ------------------------------------
"""
super(FFDNet, self).__init__()
assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
bias = True
sf = 2
self.m_down = B.PixelUnShuffle(upscale_factor=sf)
m_head = B.conv(in_nc*sf*sf+1, nc, mode='C'+act_mode[-1], bias=bias)
m_body = [B.conv(nc, nc, mode='C'+act_mode, bias=bias) for _ in range(nb-2)]
m_tail = B.conv(nc, out_nc*sf*sf, mode='C', bias=bias)
self.model = B.sequential(m_head, *m_body, m_tail)
self.m_up = nn.PixelShuffle(upscale_factor=sf)
def forward(self, x, sigma):
h, w = x.size()[-2:]
paddingBottom = int(np.ceil(h/2)*2-h)
paddingRight = int(np.ceil(w/2)*2-w)
x = torch.nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x)
x = self.m_down(x)
# m = torch.ones(sigma.size()[0], sigma.size()[1], x.size()[-2], x.size()[-1]).type_as(x).mul(sigma)
m = sigma.repeat(1, 1, x.size()[-2], x.size()[-1])
x = torch.cat((x, m), 1)
x = self.model(x)
x = self.m_up(x)
x = x[..., :h, :w]
return x
if __name__ == '__main__':
from utils import utils_model
model = FFDNet(in_nc=1, out_nc=1, nc=64, nb=15, act_mode='R')
print(utils_model.describe_model(model))
x = torch.randn((2,1,240,240))
sigma = torch.randn(2,1,1,1)
x = model(x, sigma)
print(x.shape)
# run models/network_ffdnet.py
|
{"hexsha": "81121be00ca7deb50c351f0346ff75db787743fa", "size": 2595, "ext": "py", "lang": "Python", "max_stars_repo_path": "vsffdnet/network_ffdnet.py", "max_stars_repo_name": "NSQY/vs-ffdnet", "max_stars_repo_head_hexsha": "e770853e55840f4b4682ea5687d6a5b8d335f0eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-07-22T12:34:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-20T23:03:09.000Z", "max_issues_repo_path": "vsffdnet/network_ffdnet.py", "max_issues_repo_name": "NSQY/vs-ffdnet", "max_issues_repo_head_hexsha": "e770853e55840f4b4682ea5687d6a5b8d335f0eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-22T13:38:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-23T10:57:09.000Z", "max_forks_repo_path": "vsffdnet/network_ffdnet.py", "max_forks_repo_name": "NSQY/vs-ffdnet", "max_forks_repo_head_hexsha": "e770853e55840f4b4682ea5687d6a5b8d335f0eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-07-23T21:15:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-10T03:08:42.000Z", "avg_line_length": 30.5294117647, "max_line_length": 108, "alphanum_fraction": 0.5217726397, "include": true, "reason": "import numpy", "num_tokens": 723}
|
!
! Assemble_FTS_TauProfile
!
! Program to assemble the individual TauProfile datafiles into a single
! datafile for an FTS sensor.
!
!
! FILES ACCESSED:
! Input: - Sensor TauProfile netCDF data files for each profile and
! each molecule set.
!
! Output: - TauProfile netCDF data file combining all the profile
! and molecule set data for a single sensor.
!
! SIDE EFFECTS:
! Any output files that exist are overwritten.
!
! RESTRICTIONS:
! *ALL* of the required data must be present for the output files to
! be successfully written.
!
! CREATION HISTORY:
! Written by: Paul van Delst, 02-Oct-2007
! paul.vandelst@noaa.gov
!
PROGRAM Assemble_FTS_TauProfile
! -----------------
! Environment setup
! -----------------
! Module usage
USE Type_Kinds , ONLY: fp
USE Message_Handler , ONLY: SUCCESS, FAILURE, &
Display_Message, Program_Message
USE TauProfile_Define
USE TauProfile_netCDF_IO
USE Tau_Production_Parameters
USE Tau_Production_Utility
! Disable implicit typing
IMPLICIT NONE
! ----------
! Parameters
! ----------
CHARACTER(*), PARAMETER :: PROGRAM_NAME = 'Assemble_FTS_TauProfile'
CHARACTER(*), PARAMETER :: PROGRAM_RCS_ID = &
CHARACTER(*), PARAMETER :: PATH = 'TauProfile_data/'
INTEGER, PARAMETER :: SET = 1
! ---------
! Variables
! ---------
CHARACTER(256) :: Message
CHARACTER(256) :: ID_Tag
CHARACTER(5000) :: History
CHARACTER(5000) :: Comment
CHARACTER(256) :: InFile, OutFile
CHARACTER(20) :: Generic_Sensor_ID, Sensor_ID
CHARACTER(20) :: Sensor_Name, Satellite_Name
CHARACTER(20) :: nTag, bTag
CHARACTER(20) :: jTag
CHARACTER(20) :: mTag
CHARACTER(20) :: iTag
LOGICAL :: Create_Output
INTEGER :: WMO_Satellite_ID
INTEGER :: WMO_Sensor_ID
INTEGER :: IO_Status
INTEGER :: Allocate_Status
INTEGER :: Error_Status
INTEGER :: n, n1, n2
INTEGER :: n_m, m, im, m1, m2, iProfile_Set
INTEGER :: n_j, j, jIdx, Idx(1), Molecule_Set_Numbers(N_MOLECULE_SETS)
INTEGER :: i, i1, i2
INTEGER :: iDir
INTEGER :: n_Channels
INTEGER, ALLOCATABLE :: Channel_List(:)
TYPE(TauProfile_type) :: TauProfile
! Output header
! -------------
CALL Program_Message( PROGRAM_NAME, &
'Program to assemble the individual TauProfile datafiles '//&
'for the bands of an FTS sensor into single datafiles.', &
'$Revision$' )
! Get user input
! --------------
! The profile set being processed
WRITE(*, FMT='(/5x,"Select the DEPENDENT PROFILE SET")')
DO i = 1, N_PROFILE_SETS
WRITE(*,FMT='(10x,i2,") ",a," profile set")') i, TRIM(PROFILE_SET_ID_TAG(i))
END DO
WRITE(*,FMT='(5x,"Enter choice: ")',ADVANCE='NO')
READ(*,FMT='(i1)',IOSTAT=IO_Status ) iProfile_Set
IF ( IO_Status /= 0 ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Invalid DEPENDENT PROFILE SET identifier input.', &
FAILURE )
STOP
END IF
IF ( iProfile_Set < 1 .OR. iProfile_Set > N_PROFILE_SETS ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Invalid DEPENDENT PROFILE SET identifier value.', &
FAILURE )
STOP
ENDIF
! The molecule set index number(s)
WRITE(*,FMT='(/5x,"Enter the number of MOLECULE SETS to assemble: ")',ADVANCE='NO' )
READ(*,FMT='(i2)',IOSTAT=IO_Status ) n_j
IF ( IO_Status /= 0 ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Invalid input.', &
FAILURE )
STOP
END IF
IF ( n_j < 1 .OR. n_j > N_MOLECULE_SETS ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Value is outside valid range.', &
FAILURE )
STOP
ENDIF
Molecule_Set_Numbers = -1
WRITE(*,FMT='(5x,"Enter the MOLECULE SET numbers to process: ")',ADVANCE='NO')
READ(*,FMT=*,IOSTAT=IO_Status) Molecule_Set_Numbers(1:n_j)
IF ( IO_Status /= 0 ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Invalid MOLECULE SET input.', &
FAILURE )
STOP
END IF
DO j = 1, n_j
IF ( .NOT. ANY(MOLECULE_SET_TAG_ID == Molecule_Set_Numbers(j)) ) THEN
WRITE( Message,'("Input MOLECULE SET value ",i0," is invalid.")' ) &
Molecule_Set_Numbers(j)
CALL Display_Message( PROGRAM_NAME, &
TRIM(Message), &
FAILURE )
STOP
END IF
END DO
! The calculation direction
! -------------------------
WRITE(*, FMT='(/5x,"Select atmospheric path")')
DO i = 1, N_DIRECTIONS
WRITE(*,FMT='(10x,i1,") ",a)') i, TRIM(DIRECTION_NAME(i))
END DO
WRITE(*,FMT='(5x,"Enter choice: ")',ADVANCE='NO')
READ(*,FMT='(i1)',IOSTAT=IO_Status) iDir
IF ( IO_Status /= 0 ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Invalid ATMOSPHERIC PATH identifier input.', &
FAILURE )
STOP
END IF
IF ( iDir /= UPWELLING_DIRECTION .AND. &
iDir /= DOWNWELLING_DIRECTION ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Invalid ATMOSPERIC PATH identifier value.', &
FAILURE )
STOP
ENDIF
! Ask for the instrument bands to process
! ---------------------------------------
WRITE(*, FMT='(/5x,"Enter the being and end instrument bands [n1,n2]: ")', ADVANCE='NO')
READ(*,FMT='(i5,i5)',IOSTAT=IO_Status) n1, n2
IF ( IO_Status /= 0 ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Invalid BAND input.', &
FAILURE )
STOP
END IF
! Define limits
! -------------
! Define the generic sensor id (hardwired for IASI for now)
Sensor_Name = 'iasi'
Satellite_Name = 'metop-a'
Generic_Sensor_Id = TRIM(Sensor_Name)//'_'//TRIM(Satellite_Name)
! Define the profile limits
m1 = 1; m2 = N_PROFILES(iProfile_Set)
! Define the angle limits
i1 = ZENITH_ANGLE_BEGIN; i2 = ZENITH_ANGLE_END
! Begin band loop
! ---------------
Band_Loop: DO n = n1, n2
WRITE( nTag,'("band",i0)' ) n
WRITE( bTag,'("B",i0)' ) n
Sensor_Id = TRIM(Sensor_Name)//TRIM(bTag)//'_'//TRIM(Satellite_Name)
! Construct the current band output filename
OutFile = TRIM(DIRECTION_NAME(iDir))//'.'//TRIM(Sensor_Id)//'.TauProfile.nc'
! Create an output file for every band
Create_Output = .TRUE.
! Deallocate the channel list array if required
IF ( ALLOCATED(Channel_List) ) THEN
DEALLOCATE( Channel_List, STAT=Allocate_Status )
IF ( Allocate_Status /= 0 ) THEN
WRITE( Message,'("Error deallocating channel list array for ",a,". STAT=",i0)' ) &
TRIM(nTag), Allocate_Status
CALL Display_Message( PROGRAM_NAME, &
TRIM(Message), &
FAILURE )
STOP
END IF
END IF
! Begin molecule set loop
! -----------------------
Molecule_Loop: DO jIdx = 1, n_j
! Define the molecule set number and name
Idx = PACK((/(j,j=1,N_MOLECULE_SETS)/), Molecule_Set_Numbers(jIdx)==MOLECULE_SET_TAG_ID)
j = Idx(1)
jTag = MOLECULE_SET_TAG(j)
! Begin profile loop
! ------------------
Profile_Loop: DO m = m1, m2
WRITE( mTag,'("profile",i2.2)' ) m
! Begin angle loop
! ----------------
Angle_Loop: DO i = i1, i2
WRITE( iTag,'("angle",i1)' ) i
! Construct the input TauProfile filename
InFile = TRIM(nTag)//'/'//&
TRIM(jTag)//'/'//&
TRIM(mTag)//'/'//&
TRIM(iTag)//'/'//&
TRIM(DIRECTION_NAME(iDir))//'.'//&
TRIM(Generic_Sensor_Id)//'.REAL.TauProfile.nc'
! Create the output file
! ----------------------
Create_Output_File: IF ( Create_Output ) THEN
! Turn off output creation
Create_Output = .FALSE.
! Inquire the current input file for
! its channel dimension and attributes
Error_Status = Inquire_TauProfile_netCDF( InFile, &
n_Channels = n_Channels , &
WMO_Satellite_ID = WMO_Satellite_ID, &
WMO_Sensor_ID = WMO_Sensor_ID , &
ID_Tag = ID_Tag , &
History = History , &
Comment = Comment )
IF ( Error_Status /= SUCCESS ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Error inquiring netCDF TauProfile file '//&
TRIM(InFile), &
Error_Status )
STOP
END IF
! Inquire the file for its channel list
ALLOCATE( Channel_List(n_Channels), STAT=Allocate_Status )
IF ( Allocate_Status /= 0 ) THEN
WRITE( Message,'("Error allocating channel list array for ",a,". STAT=",i0)' ) &
TRIM(InFile), Allocate_Status
CALL Display_Message( PROGRAM_NAME, &
TRIM(Message), &
FAILURE )
STOP
END IF
Error_Status = Inquire_TauProfile_netCDF( InFile, &
Channel_List = Channel_List )
IF ( Error_Status /= SUCCESS ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Error obtaining channel list from '//TRIM(InFile), &
Error_Status )
STOP
END IF
! Create the output file (CLOBBER mode)
Error_Status = Create_TauProfile_netCDF( PATH//TRIM(OutFile), &
LEVEL_PRESSURE, &
Channel_List, &
ZENITH_ANGLE_SECANT(i1:i2), &
(/(im,im=m1,m2)/), &
Molecule_Set_Numbers(1:n_j), &
Release = TauProfile%Release, &
Version = TauProfile%Version, &
Sensor_ID = TRIM(Sensor_ID), &
WMO_Satellite_ID = WMO_Satellite_ID, &
WMO_Sensor_ID = WMO_Sensor_ID, &
ID_Tag = TRIM(ID_Tag), &
Title = TRIM(DIRECTION_NAME(iDir))//' '//&
TRIM(nTag)//' transmittances for '//&
TRIM(Generic_Sensor_Id), &
History = PROGRAM_RCS_ID//'; '//TRIM(History), &
Comment = TRIM(Comment) )
IF ( Error_Status /= SUCCESS ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Error creating netCDF TauProfile file '//&
TRIM(OutFile), &
Error_Status )
STOP
END IF
WRITE( *,'(10x,a," created.")' ) TRIM(OutFile)
END IF Create_Output_File
! Read the current input file data
! --------------------------------
Error_Status = Read_TauProfile_netCDF( InFile, TauProfile, Quiet=SET )
IF ( Error_Status /= SUCCESS ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Error reading netCDF TauProfile file '//&
TRIM(InFile), &
Error_Status )
STOP
END IF
! Check that it's the correct molecule set, profile and angle
IF ( TauProfile%Molecule_Set(1) /= j .OR. &
TauProfile%Profile(1) /= m .OR. &
TauProfile%Angle(1) /= ZENITH_ANGLE_SECANT(i) ) THEN
Error_Status = FAILURE
WRITE( Message,'("Molecule_Set, Profile, or Angle value for ",a,&
&" (",i2,",",i3,",",f4.2,") are different than expected",&
&" (",i2,",",i3,",",f4.2,").")' ) &
TRIM(InFile), &
TauProfile%Molecule_Set(1), &
TauProfile%Profile(1), &
TauProfile%Angle(1), &
j, m, ZENITH_ANGLE_SECANT(i)
CALL Display_Message( PROGRAM_NAME, &
TRIM(Message), &
Error_Status )
STOP
END IF
! Check that the channel lists are the same
IF ( ANY( (TauProfile%Channel-Channel_List) /= 0 ) ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Channel list values differ for '//TRIM(InFile), &
FAILURE )
STOP
END IF
! Write the data into the output file
! -----------------------------------
Error_Status = Write_TauProfile_netCDF( PATH//TRIM(OutFile), &
TauProfile%Tau(:,:,1,1,1), &
Angle =ZENITH_ANGLE_SECANT(i), &
Profile =m, &
Molecule_Set=j, &
Quiet =SET )
IF ( Error_Status /= SUCCESS ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Error writing data from '//&
TRIM(InFile)//' to '//TRIM(OutFile), &
Error_Status )
STOP
END IF
! Destroy the TauProfile structure
! --------------------------------
Error_Status = Destroy_TauProfile( TauProfile )
IF ( Error_Status /= SUCCESS ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Error destroying TauProfile structure read from '//TRIM(InFile), &
Error_Status )
STOP
END IF
END DO Angle_Loop
END DO Profile_Loop
END DO Molecule_Loop
! Create a signal file
! --------------------
Error_Status = Create_Signal_File( TRIM(OutFile) )
IF ( Error_Status /= SUCCESS ) THEN
CALL Display_Message( PROGRAM_NAME, &
'Error creating signal file for '//TRIM(OutFile), &
Error_Status )
STOP
END IF
END DO Band_Loop
END PROGRAM Assemble_FTS_TauProfile
|
{"hexsha": "afa2eededa40dd0b087014118a24962e8138cb52", "size": 15732, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/TauProd/Infrared/Assemble_FTS_TauProfile/Assemble_FTS_TauProfile.f90", "max_stars_repo_name": "hsbadr/crtm", "max_stars_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-11-19T10:00:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T02:42:18.000Z", "max_issues_repo_path": "src/TauProd/Infrared/Assemble_FTS_TauProfile/Assemble_FTS_TauProfile.f90", "max_issues_repo_name": "hsbadr/crtm", "max_issues_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-05T21:04:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T18:23:10.000Z", "max_forks_repo_path": "src/TauProd/Infrared/Assemble_FTS_TauProfile/Assemble_FTS_TauProfile.f90", "max_forks_repo_name": "hsbadr/crtm", "max_forks_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-10-29T17:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T08:42:45.000Z", "avg_line_length": 38.0, "max_line_length": 101, "alphanum_fraction": 0.4740020341, "num_tokens": 3511}
|
\section{\module{subprocess} --- Subprocess management}
\declaremodule{standard}{subprocess}
\modulesynopsis{Subprocess management.}
\moduleauthor{Peter \AA strand}{astrand@lysator.liu.se}
\sectionauthor{Peter \AA strand}{astrand@lysator.liu.se}
\versionadded{2.4}
The \module{subprocess} module allows you to spawn new processes,
connect to their input/output/error pipes, and obtain their return
codes. This module intends to replace several other, older modules
and functions, such as:
\begin{verbatim}
os.system
os.spawn*
os.popen*
popen2.*
commands.*
\end{verbatim}
Information about how the \module{subprocess} module can be used to
replace these modules and functions can be found in the following
sections.
\subsection{Using the subprocess Module}
This module defines one class called \class{Popen}:
\begin{classdesc}{Popen}{args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0}
Arguments are:
\var{args} should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On \UNIX{}, with \var{shell=False} (default): In this case, the Popen
class uses \method{os.execvp()} to execute the child program.
\var{args} should normally be a sequence. A string will be treated as a
sequence with the string as the only item (the program to execute).
On \UNIX{}, with \var{shell=True}: If args is a string, it specifies the
command string to execute through the shell. If \var{args} is a
sequence, the first item specifies the command string, and any
additional items will be treated as additional shell arguments.
On Windows: the \class{Popen} class uses CreateProcess() to execute
the child program, which operates on strings. If \var{args} is a
sequence, it will be converted to a string using the
\method{list2cmdline} method. Please note that not all MS Windows
applications interpret the command line the same way:
\method{list2cmdline} is designed for applications using the same
rules as the MS C runtime.
\var{bufsize}, if given, has the same meaning as the corresponding
argument to the built-in open() function: \constant{0} means unbuffered,
\constant{1} means line buffered, any other positive value means use a
buffer of (approximately) that size. A negative \var{bufsize} means to
use the system default, which usually means fully buffered. The default
value for \var{bufsize} is \constant{0} (unbuffered).
The \var{executable} argument specifies the program to execute. It is
very seldom needed: Usually, the program to execute is defined by the
\var{args} argument. If \code{shell=True}, the \var{executable}
argument specifies which shell to use. On \UNIX{}, the default shell
is \file{/bin/sh}. On Windows, the default shell is specified by the
\envvar{COMSPEC} environment variable.
\var{stdin}, \var{stdout} and \var{stderr} specify the executed
programs' standard input, standard output and standard error file
handles, respectively. Valid values are \code{PIPE}, an existing file
descriptor (a positive integer), an existing file object, and
\code{None}. \code{PIPE} indicates that a new pipe to the child
should be created. With \code{None}, no redirection will occur; the
child's file handles will be inherited from the parent. Additionally,
\var{stderr} can be \code{STDOUT}, which indicates that the stderr
data from the applications should be captured into the same file
handle as for stdout.
If \var{preexec_fn} is set to a callable object, this object will be
called in the child process just before the child is executed.
(\UNIX{} only)
If \var{close_fds} is true, all file descriptors except \constant{0},
\constant{1} and \constant{2} will be closed before the child process is
executed. (\UNIX{} only)
If \var{shell} is \constant{True}, the specified command will be
executed through the shell.
If \var{cwd} is not \code{None}, the child's current directory will be
changed to \var{cwd} before it is executed. Note that this directory
is not considered when searching the executable, so you can't specify
the program's path relative to \var{cwd}.
If \var{env} is not \code{None}, it defines the environment variables
for the new process.
If \var{universal_newlines} is \constant{True}, the file objects stdout
and stderr are opened as text files, but lines may be terminated by
any of \code{'\e n'}, the \UNIX{} end-of-line convention, \code{'\e r'},
the Macintosh convention or \code{'\e r\e n'}, the Windows convention.
All of these external representations are seen as \code{'\e n'} by the
Python program. \note{This feature is only available if Python is built
with universal newline support (the default). Also, the newlines
attribute of the file objects \member{stdout}, \member{stdin} and
\member{stderr} are not updated by the communicate() method.}
The \var{startupinfo} and \var{creationflags}, if given, will be
passed to the underlying CreateProcess() function. They can specify
things such as appearance of the main window and priority for the new
process. (Windows only)
\end{classdesc}
\subsubsection{Convenience Functions}
This module also defines two shortcut functions:
\begin{funcdesc}{call}{*popenargs, **kwargs}
Run command with arguments. Wait for command to complete, then
return the \member{returncode} attribute.
The arguments are the same as for the Popen constructor. Example:
\begin{verbatim}
retcode = call(["ls", "-l"])
\end{verbatim}
\end{funcdesc}
\begin{funcdesc}{check_call}{*popenargs, **kwargs}
Run command with arguments. Wait for command to complete. If the exit
code was zero then return, otherwise raise \exception{CalledProcessError.}
The \exception{CalledProcessError} object will have the return code in the
\member{returncode} attribute.
The arguments are the same as for the Popen constructor. Example:
\begin{verbatim}
check_call(["ls", "-l"])
\end{verbatim}
\versionadded{2.5}
\end{funcdesc}
\subsubsection{Exceptions}
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
\member{child_traceback}, which is a string containing traceback
information from the childs point of view.
The most common exception raised is \exception{OSError}. This occurs,
for example, when trying to execute a non-existent file. Applications
should prepare for \exception{OSError} exceptions.
A \exception{ValueError} will be raised if \class{Popen} is called
with invalid arguments.
check_call() will raise \exception{CalledProcessError}, if the called
process returns a non-zero return code.
\subsubsection{Security}
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
\subsection{Popen Objects}
Instances of the \class{Popen} class have the following methods:
\begin{methoddesc}{poll}{}
Check if child process has terminated. Returns returncode
attribute.
\end{methoddesc}
\begin{methoddesc}{wait}{}
Wait for child process to terminate. Returns returncode attribute.
\end{methoddesc}
\begin{methoddesc}{communicate}{input=None}
Interact with process: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached. Wait for process to terminate.
The optional \var{input} argument should be a string to be sent to the
child process, or \code{None}, if no data should be sent to the child.
communicate() returns a tuple (stdout, stderr).
\note{The data read is buffered in memory, so do not use this method
if the data size is large or unlimited.}
\end{methoddesc}
The following attributes are also available:
\begin{memberdesc}{stdin}
If the \var{stdin} argument is \code{PIPE}, this attribute is a file
object that provides input to the child process. Otherwise, it is
\code{None}.
\end{memberdesc}
\begin{memberdesc}{stdout}
If the \var{stdout} argument is \code{PIPE}, this attribute is a file
object that provides output from the child process. Otherwise, it is
\code{None}.
\end{memberdesc}
\begin{memberdesc}{stderr}
If the \var{stderr} argument is \code{PIPE}, this attribute is file
object that provides error output from the child process. Otherwise,
it is \code{None}.
\end{memberdesc}
\begin{memberdesc}{pid}
The process ID of the child process.
\end{memberdesc}
\begin{memberdesc}{returncode}
The child return code. A \code{None} value indicates that the process
hasn't terminated yet. A negative value -N indicates that the child
was terminated by signal N (\UNIX{} only).
\end{memberdesc}
\subsection{Replacing Older Functions with the subprocess Module}
In this section, "a ==> b" means that b can be used as a replacement
for a.
\note{All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an
\exception{OSError} exception.}
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
\subsubsection{Replacing /bin/sh shell backquote}
\begin{verbatim}
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
\end{verbatim}
\subsubsection{Replacing shell pipe line}
\begin{verbatim}
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
\end{verbatim}
\subsubsection{Replacing os.system()}
\begin{verbatim}
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
sts = os.waitpid(p.pid, 0)
\end{verbatim}
Notes:
\begin{itemize}
\item Calling the program through the shell is usually not required.
\item It's easier to look at the \member{returncode} attribute than
the exit status.
\end{itemize}
A more realistic example would look like this:
\begin{verbatim}
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
\end{verbatim}
\subsubsection{Replacing os.spawn*}
P_NOWAIT example:
\begin{verbatim}
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
\end{verbatim}
P_WAIT example:
\begin{verbatim}
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
\end{verbatim}
Vector example:
\begin{verbatim}
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
\end{verbatim}
Environment example:
\begin{verbatim}
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
\end{verbatim}
\subsubsection{Replacing os.popen*}
\begin{verbatim}
pipe = os.popen(cmd, mode='r', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
\end{verbatim}
\begin{verbatim}
pipe = os.popen(cmd, mode='w', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
\end{verbatim}
\begin{verbatim}
(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
\end{verbatim}
\begin{verbatim}
(child_stdin,
child_stdout,
child_stderr) = os.popen3(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
\end{verbatim}
\begin{verbatim}
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
\end{verbatim}
\subsubsection{Replacing popen2.*}
\note{If the cmd argument to popen2 functions is a string, the command
is executed through /bin/sh. If it is a list, the command is directly
executed.}
\begin{verbatim}
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
\end{verbatim}
\begin{verbatim}
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
\end{verbatim}
The popen2.Popen3 and popen2.Popen4 basically works as subprocess.Popen,
except that:
\begin{itemize}
\item subprocess.Popen raises an exception if the execution fails
\item the \var{capturestderr} argument is replaced with the \var{stderr}
argument.
\item stdin=PIPE and stdout=PIPE must be specified.
\item popen2 closes all file descriptors by default, but you have to
specify close_fds=True with subprocess.Popen.
\end{itemize}
|
{"hexsha": "509f283f6c32cdcbe5fe2ced84882549d79827cd", "size": 13403, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Doc/lib/libsubprocess.tex", "max_stars_repo_name": "deadsnakes/python2.5", "max_stars_repo_head_hexsha": "d5dbcd8556f1e45094bd383b50727e248d9de1bf", "max_stars_repo_licenses": ["PSF-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-10-23T02:57:29.000Z", "max_stars_repo_stars_event_max_datetime": "2015-10-23T02:57:29.000Z", "max_issues_repo_path": "Doc/lib/libsubprocess.tex", "max_issues_repo_name": "deadsnakes/python2.5", "max_issues_repo_head_hexsha": "d5dbcd8556f1e45094bd383b50727e248d9de1bf", "max_issues_repo_licenses": ["PSF-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Doc/lib/libsubprocess.tex", "max_forks_repo_name": "deadsnakes/python2.5", "max_forks_repo_head_hexsha": "d5dbcd8556f1e45094bd383b50727e248d9de1bf", "max_forks_repo_licenses": ["PSF-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-01-30T21:52:13.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-18T21:33:17.000Z", "avg_line_length": 33.0938271605, "max_line_length": 78, "alphanum_fraction": 0.7482653137, "num_tokens": 3487}
|
import numpy as np
from keras.models import Model
from keras.optimizers import Adam
from keras.layers import Input, Conv2D, UpSampling2D, Dense, Flatten, Reshape
from keras.layers.advanced_activations import LeakyReLU
from keras.datasets import mnist
from utils import dataIterator, sample_images
from tqdm import tqdm
class DCGAN:
"""
Adjusted for mnist, can be altered to any other dataset with changes to D and G
"""
def __init__(self, x_input_shape=(28, 28, 1),
g_latent_dim=100,
discriminator_optimizer=Adam(0.0001),
generator_optimizer=Adam(0.0001)):
self.x_input_shape = x_input_shape
self.g_latent_dim = g_latent_dim
self._discriminator = self.build_discriminator()
self._discriminator.compile(optimizer=discriminator_optimizer, loss='binary_crossentropy')
# Gradients that will pass through the combined model will not update the discriminator
self._discriminator.trainable = False
for layer in self._discriminator.layers:
layer.trainable = False
self._generator = self.build_generator()
# Create the combined GAN model
z = Input((self.g_latent_dim,))
generator_out = self._generator(z)
discriminator_out = self._discriminator(generator_out)
self._combined_model = Model(inputs=z, outputs=discriminator_out)
self._combined_model.compile(optimizer=generator_optimizer, loss='binary_crossentropy')
def build_discriminator(self):
input = Input(self.x_input_shape)
x = Conv2D(32, kernel_size=3, strides=2, padding="same")(input)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(64, kernel_size=3, strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(128, kernel_size=3, strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(256, kernel_size=3, strides=1, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = Flatten()(x)
x = Dense(units=1, activation='sigmoid')(x)
return Model(inputs=input, outputs=x)
def build_generator(self):
input = Input((self.g_latent_dim, ))
x = Dense(units=(int(self.x_input_shape[0] / 4)) * int((self.x_input_shape[1] / 4)) * 20)(input)
x = Reshape((7, 7, 20))(x)
x = UpSampling2D(interpolation='nearest')(x)
x = Conv2D(32, kernel_size=3, padding="same", activation='relu')(x)
x = UpSampling2D(interpolation='nearest')(x)
x = Conv2D(64, kernel_size=3, padding="same", activation='relu')(x)
x = Conv2D(self.x_input_shape[2], kernel_size=3, padding="same", activation='tanh')(x)
return Model(inputs=input, outputs=x)
def train_on_batch(self, x,
z):
if len(x) != len(z):
raise ValueError('x and z must have the same batch size')
# -----------------------
# Train the discriminator
# -----------------------
# Train the discriminator to mark real data with 1's
d_real_loss = self._discriminator.train_on_batch(x, np.ones(len(x)))
# Train the discriminator to mark generated data with 0's
d_generated_loss = self._discriminator.train_on_batch(self._generator.predict(z), np.zeros(len(z)))
d_loss = 0.5 * (d_real_loss + d_generated_loss)
# -----------------------
# Train the generator
# -----------------------
g_loss = self._combined_model.train_on_batch(z, np.ones(len(z)))
return d_loss, g_loss
def fit(self, x,
batch_size=32,
epochs=1,
verbose=True):
data_iterator = dataIterator(x, batch_size)
batches_per_epoch = len(x) // batch_size
batches_per_epoch += 1 if batches_per_epoch < 1 else 0
for epoch in range(1, epochs+1):
tqdm_ = tqdm(range(batches_per_epoch), disable=not verbose)
for _ in tqdm_:
x_current_batch = next(data_iterator)
z = np.random.normal(0, 1, (batch_size, self.g_latent_dim))
d_loss, g_loss = self.train_on_batch(x_current_batch, z)
tqdm_.set_description("EPOCH: {}, D loss: {}, G loss: {}".format(epoch, d_loss, g_loss))
generated_images = 0.5 * self._generator.predict(np.random.normal(0, 1, (25, self.g_latent_dim))) + 0.5
sample_images(generated_images, epoch, 'images')
if __name__ == '__main__':
(X_train, _), (_, _) = mnist.load_data()
X_train = X_train / 127.5 - 1.
X_train = np.expand_dims(X_train, axis=3)
gan = DCGAN()
gan.fit(X_train, epochs=30)
|
{"hexsha": "06fe3a6155f3e22374337b417377cf200c869320", "size": 4689, "ext": "py", "lang": "Python", "max_stars_repo_path": "Keras/DCGAN.py", "max_stars_repo_name": "yotamin/GAN", "max_stars_repo_head_hexsha": "b12068c944a6d9e301d99ebbef844ec71e6d9182", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-02-18T22:37:34.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-18T22:37:34.000Z", "max_issues_repo_path": "Keras/DCGAN.py", "max_issues_repo_name": "yotamin/GAN", "max_issues_repo_head_hexsha": "b12068c944a6d9e301d99ebbef844ec71e6d9182", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Keras/DCGAN.py", "max_forks_repo_name": "yotamin/GAN", "max_forks_repo_head_hexsha": "b12068c944a6d9e301d99ebbef844ec71e6d9182", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1219512195, "max_line_length": 115, "alphanum_fraction": 0.6197483472, "include": true, "reason": "import numpy", "num_tokens": 1177}
|
open import Everything
{-
open import Oscar.Prelude
open import Oscar.Class.HasEquivalence
open import Oscar.Class.Symmetrical
open import Oscar.Data.Term
open import Oscar.Data.Substitunction
open import Oscar.Data.Surjcollation
open import Oscar.Data.Surjextenscollation
open import Oscar.Data.Proposequality
import Oscar.Class.HasEquivalence.ExtensionṖroperty
import Oscar.Class.HasEquivalence.Ṗroperty
import Oscar.Class.Symmetrical.ExtensionalUnifies
import Oscar.Class.Symmetrical.Unifies
import Oscar.Property.Setoid.Proposequality -- FIXME see _∼⁰_ below; comment this out to observe confusing error messages
import Oscar.Property.Functor.SubstitunctionExtensionTerm
import Oscar.Class.Surjection.⋆
-}
module Test.SymmetricalSubstitunction {𝔭} (𝔓 : Ø 𝔭) (ℓ : Ł) where
open Term 𝔓 using () renaming (
Term to 𝑩;
Terms to 𝑩')
open Substitunction 𝔓 using () renaming (
Substitunction to 𝑪)
infix 18 _∼⁰_
_∼⁰_ = ≡-surjcollation⟦ 𝑪 ⟧ -- ≡-Unifies₀⟦ 𝑪 ⟧ -- FIXME gives a confusing error message
-- _∼⁰_ = ≡-SymUnifies₀⟦ 𝑪 ⟧ -- FIXME gives a more useful error message
open Surjextenscollation 𝑪 _≡̇_ renaming (_⟹_ to _∼¹_)
fact1⋆ : ∀ {𝓃} (𝓈 𝓉 : 𝑩 𝓃) → 𝓈 ∼⁰ 𝓉 ≈ 𝓉 ∼⁰ 𝓈
fact1⋆ 𝓈 𝓉 = symmetrical 𝓈 𝓉
-- fact1⋆ 𝓈 𝓉 = symmetrical ⦃ r = Oscar.Class.Symmetrical.Unifies.𝓢ymmetricalUnifies₀ ⦃ ! ⦄ ⦃ ! ⦄ ⦃ Oscar.Property.Setoid.Proposequality.𝓢ymmetryProposequality ⦄ ⦄ 𝓈 𝓉 -- FIXME I wish Agda would tell us that this is how the instances were resolved
fact1⋆s : ∀ {N 𝓃} (𝓈 𝓉 : 𝑩' N 𝓃) → 𝓈 ∼⁰ 𝓉 ≈ 𝓉 ∼⁰ 𝓈
fact1⋆s 𝓈 𝓉 = symmetrical 𝓈 𝓉
fact1 : ∀ {𝓃} (𝓈 𝓉 : 𝑩 𝓃) → 𝓈 ∼¹ 𝓉 ≈ 𝓉 ∼¹ 𝓈
fact1 𝓈 𝓉 = symmetrical 𝓈 𝓉
fact1s : ∀ {N 𝓃} (𝓈 𝓉 : 𝑩' N 𝓃) → 𝓈 ∼¹ 𝓉 ≈ 𝓉 ∼¹ 𝓈
fact1s 𝓈 𝓉 = symmetrical 𝓈 𝓉
|
{"hexsha": "cb97bdeaf8356b7b980a769a243a1b4d0801f371", "size": 1732, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "archive/agda-3/src/Test/SymmetricalSubstitunction.agda", "max_stars_repo_name": "m0davis/oscar", "max_stars_repo_head_hexsha": "52e1cdbdee54d9a8eaee04ee518a0d7f61d25afb", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "archive/agda-3/src/Test/SymmetricalSubstitunction.agda", "max_issues_repo_name": "m0davis/oscar", "max_issues_repo_head_hexsha": "52e1cdbdee54d9a8eaee04ee518a0d7f61d25afb", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-29T00:35:04.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-11T23:33:04.000Z", "max_forks_repo_path": "archive/agda-3/src/Test/SymmetricalSubstitunction.agda", "max_forks_repo_name": "m0davis/oscar", "max_forks_repo_head_hexsha": "52e1cdbdee54d9a8eaee04ee518a0d7f61d25afb", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8510638298, "max_line_length": 249, "alphanum_fraction": 0.7130484988, "num_tokens": 752}
|
\documentclass[12pt]{article}
\include{preamble}
\title{Math 341 / 650 Spring 2020 \\ Midterm Examination One}
\author{Professor Adam Kapelner}
\date{Thursday, February 27, 2020}
\begin{document}
\maketitle
\noindent Full Name \line(1,0){410}
\thispagestyle{empty}
\section*{Code of Academic Integrity}
\footnotesize
Since the college is an academic community, its fundamental purpose is the pursuit of knowledge. Essential to the success of this educational mission is a commitment to the principles of academic integrity. Every member of the college community is responsible for upholding the highest standards of honesty at all times. Students, as members of the community, are also responsible for adhering to the principles and spirit of the following Code of Academic Integrity.
Activities that have the effect or intention of interfering with education, pursuit of knowledge, or fair evaluation of a student's performance are prohibited. Examples of such activities include but are not limited to the following definitions:
\paragraph{Cheating} Using or attempting to use unauthorized assistance, material, or study aids in examinations or other academic work or preventing, or attempting to prevent, another from using authorized assistance, material, or study aids. Example: using an unauthorized cheat sheet in a quiz or exam, altering a graded exam and resubmitting it for a better grade, etc.
\\
\noindent I acknowledge and agree to uphold this Code of Academic Integrity. \\
\begin{center}
\line(1,0){250} ~~~ \line(1,0){100}\\
~~~~~~~~~~~~~~~~~~~~~signature~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ date
\end{center}
\normalsize
\section*{Instructions}
This exam is seventy five minutes and closed-book. You are allowed \textbf{one} page (front and back) of a \qu{cheat sheet.} You may use a graphing calculator of your choice. Please read the questions carefully. If the question reads \qu{compute,} this means the solution will be a number otherwise you can leave the answer in \textit{any} widely accepted mathematical notation which could be resolved to an exact or approximate number with the use of a computer. I advise you to skip problems marked \qu{[Extra Credit]} until you have finished the other questions on the exam, then loop back. I also advise you to use pencil. The exam is 100 points total plus extra credit. Partial credit will be granted for incomplete answers on most of the questions. \fbox{Box} in your final answers. NO FOOD but drinks okay. Good luck!
\pagebreak
\begin{table}[htp]
\centering
\small
\begin{tabular}{l | llll}
Distribution & Quantile & PMF / PDF &CDF & Sampling \\
of r.v. & Function & function & function & Function \\ \hline
beta & \texttt{qbeta}($p$, $\alpha$, $\beta$)
& \texttt{d-}($x$, $\alpha$, $\beta$)
& \texttt{p-}($x$, $\alpha$, $\beta$)
& \texttt{r-}($\alpha$, $\beta$) \\
betabinomial & \texttt{qbetabinom}($p$, $n$, $\alpha$, $\beta$)
& \texttt{d-}($x$, $n$, $\alpha$, $\beta$)
& \texttt{p-}($x$, $n$, $\alpha$, $\beta$)
& \texttt{r-}($n$, $\alpha$, $\beta$) \\
betanegativebinomial & \texttt{qbeta\_nbinom}($p$, $r$, $\alpha$, $\beta$)
& \texttt{d-}($x$, $r$, $\alpha$, $\beta$)
& \texttt{p-}($x$, $r$, $\alpha$, $\beta$)
& \texttt{r-}($r$, $\alpha$, $\beta$) \\
binomial & \texttt{qbinom}($p$, $n$, $\theta$)
& \texttt{d-}($x$, $n$, $\theta$)
& \texttt{p-}($x$, $n$, $\theta$)
& \texttt{r-}($n$, $\theta$) \\
exponential & \texttt{qexp}($p$, $\theta$)
& \texttt{d-}($x$, $\theta$)
& \texttt{p-}($x$, $\theta$)
& \texttt{r-}($\theta$) \\
gamma & \texttt{qgamma}($p$, $\alpha$, $\beta$)
& \texttt{d-}($x$, $\alpha$, $\beta$)
& \texttt{p-}($x$, $\alpha$, $\beta$)
& \texttt{r-}($\alpha$, $\beta$) \\
geometric & \texttt{qgeom}($p$, $\theta$)
& \texttt{d-}($x$, $\theta$)
& \texttt{p-}($x$, $\theta$)
& \texttt{r-}($\theta$) \\
inversegamma & \texttt{qinvgamma}($p$, $\alpha$, $\beta$)
& \texttt{d-}($x$, $\alpha$, $\beta$)
& \texttt{p-}($x$, $\alpha$, $\beta$)
& \texttt{r-}($\alpha$, $\beta$) \\
negative-binomial & \texttt{qnbinom}($p$, $r$, $\theta$)
& \texttt{d-}($x$, $r$, $\theta$)
& \texttt{p-}($x$, $r$, $\theta$)
& \texttt{r-}($r$, $\theta$) \\
normal (univariate) & \texttt{qnorm}($p$, $\theta$, $\sigma$)
& \texttt{d-}($x$, $\theta$, $\sigma$)
& \texttt{p-}($x$, $\theta$, $\sigma$)
& \texttt{r-}($\theta$, $\sigma$) \\
%normal (multivariate) &
%& \multicolumn{2}{l}{\texttt{dmvnorm}($\x$, $\muvec$, $\bSigma$)}
%& \texttt{r-}($\muvec$, $\bSigma$) \\
poisson & \texttt{qpois}($p$, $\theta$)
& \texttt{d-}($x$, $\theta$)
& \texttt{p-}($x$, $\theta$)
& \texttt{r-}($\theta$) \\
T (standard) & \texttt{qt}($p$, $\nu$)
& \texttt{d-}($x$, $\nu$)
& \texttt{p-}($x$, $\nu$)
& \texttt{r-}($\nu$) \\
%T (nonstandard) & \texttt{qt.scaled}($p$, $\nu$, $\mu$, $\sigma$)
%& \texttt{d-}($x$, $\nu$, $\mu$, $\sigma$)
%& \texttt{p-}($x$, $\nu$, $\mu$, $\sigma$)
%& \texttt{r-}($\nu$, $\mu$, $\sigma$) \\
uniform & \texttt{qunif}($p$, $a$, $b$)
& \texttt{d-}($x$, $a$, $b$)
& \texttt{p-}($x$, $a$, $b$)
& \texttt{r-}($a$, $b$) \\
\end{tabular}
\caption{Functions from $\texttt{R}$ (in alphabetical order) that can be used on this exam with their arguments. The hyphen in colums 3, 4 and 5 is shorthand notation for the full text of the r.v. which can be found in column 2.
}
\label{tab:eqs}
\end{table}
\problem Let $\mathcal{F}$ be binomial with known sample size $n = 3$. The data is all \qu{successes} i.e. $x = 3$. For all questions that have numerical answers, use three significant digits e.g. 0.123 and 1.23$\times 10^{-5}$ or fractions.
\benum
\subquestionwithpoints{2} Find the maximum likelihood estimate for $\theta$. \spc{0}
\subquestionwithpoints{4} What is the main problem with your estimate in (a)? \spc{2}
\subquestionwithpoints{3} Find the $CI_{\theta, 99\%}$. \spc{0.5}
\subquestionwithpoints{4} Does the interval in (c) fulfill the second goal of statistical inference? Yes / no and explain your answer. \spc{6}
\subquestionwithpoints{2} We will now conduct Bayesian inference. Consider the reduced parameter space $\Theta_0 = \braces{0.50, 0.99} \subset \Theta = (0, 1)$. We believe strongly in $\theta = 0.5$ but we want to give some credence to the alternate theory. Thus we establish a prior of
\beqn
\prob{\theta} = \begin{cases}
0.50 \withprob 0.9 \\
0.99 \withprob 0.1 \\
\end{cases}
\eeqn
Is this the \qu{prior of indifference} for the reduced parameter space? Yes / no and explain.\spc{2}
\subquestionwithpoints{5} Find $\thetahatmap$. \spc{3}
\subquestionwithpoints{5} Find $\prob{X = x}$. \spc{4}
\subquestionwithpoints{5} Find the posterior predictive probability $\cprob{X_* = 1}{X = x}$ where $X_*$ denotes the next observation. \spc{5}
\subquestionwithpoints{3} We will now consider the entire parameter space for the binomial model i.e. $\Theta = (0, 1)$. We will use the prior $\theta \sim \betanot{\half}{\half}$. We will see later in class that this is called the \qu{Jeffrey's Prior}. Is this an uninformative prior? Yes / no and explain.\spc{2}
\subquestionwithpoints{2} Is this the \qu{prior of indifference}? Yes / no and explain. \spc{1}
\subquestionwithpoints{2} How many pseudosuccesses and pseudofailures is within this prior?\spc{1}
\subquestionwithpoints{3} What is $\expe{\theta}$?\spc{0}
\subquestionwithpoints{5} Find $\cprob{\theta}{X = x}$. \spc{1}
\subquestionwithpoints{6} Draw $\cprob{\theta}{X = x}$ to the best of your ability. Label all axes and critical points. \spc{6}
\subquestionwithpoints{2} Does $\thetahatmap$ exist? Yes / No. \spc{-0.5}
\subquestionwithpoints{4} Find $\thetahatmmse$ and denote it in the illustration in (n). \spc{1}
\subquestionwithpoints{4} What is the proportion of shrinkage towards the prior expectation if you employ the posterior expectation as your point estimate? \spc{1}
\subquestionwithpoints{5} Find the $CR_{\theta, 99\%}$. \spc{1}
\subquestionwithpoints{5} Find the $HDR_{\theta, 99\%}$ and denote it in the illustration in (n). \spc{1}
\subquestionwithpoints{10} Test if $\theta > 0.5$. Write out the hypotheses and declare the $\alpha$ level you are comfortable with. Estimate the Bayesian $p$-value from the illustration of the posterior distribution in (n) and provide the conclusion of the test. \spc{8}
\subquestionwithpoints{4} Find the posterior predictive probability $\cprob{X_* = 1}{X = x}$ where $X_*$ denotes the next observation. \spc{1}
\subquestionwithpoints{3} What is your best guess of $X_*$? \spc{2}
\eenum
\problem Consider $\Xoneton \iid \betanot{1}{\theta}$.
\benum
\subquestionwithpoints{6} Find $\mathcal{L}\parens{\theta ; \Xoneton}$. Simplify so that your answer does not include the $B(\cdot,\cdot)$ function or the $\Gamma(\cdot)$ function.\spc{4}
\subquestionwithpoints{3} Find $\loglik{\theta ; \Xoneton}$. Simplify as much as possible.\spc{2}
\subquestionwithpoints{3} Find $\thetahatmle$. \spc{3}
\subquestionwithpoints{8} [Extra Credit] Consider $\Xoneton \iid \betanot{\theta_1}{\theta_2}$. Find the MLE for $\theta_1$ and the MLE for $\theta_2$. Partial credit is given.\spc{3}
\eenum
\end{document}
|
{"hexsha": "c3ae6c943c1611f553383f5fd837d0ed1ee8bf27", "size": 9167, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "exams/midterm1/midterm1.tex", "max_stars_repo_name": "kapelner/QC_Math_341_Spring_2020", "max_stars_repo_head_hexsha": "ee25c45f90d707a0e04eae432ea930af93480529", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-02-12T23:53:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-16T01:21:32.000Z", "max_issues_repo_path": "exams/midterm1/midterm1.tex", "max_issues_repo_name": "kapelner/QC_Math_341_Spring_2020", "max_issues_repo_head_hexsha": "ee25c45f90d707a0e04eae432ea930af93480529", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exams/midterm1/midterm1.tex", "max_forks_repo_name": "kapelner/QC_Math_341_Spring_2020", "max_forks_repo_head_hexsha": "ee25c45f90d707a0e04eae432ea930af93480529", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-02-05T02:27:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-09T19:04:32.000Z", "avg_line_length": 45.157635468, "max_line_length": 824, "alphanum_fraction": 0.6690302171, "num_tokens": 3034}
|
import pyhanabi
from rl_env import Agent
from random import randint
import numpy as np
import copy
class ProbAgent(Agent):
"""Agent that applies a simple heuristic."""
def __init__(self, config, *args, **kwargs):
"""Initialize the agent."""
self.config = config
# Extract max info tokens or set default to 8.
self.max_information_tokens = config.get('information_tokens', 8)
@staticmethod
def playable_card(card, fireworks):
"""A card is playable if it can be placed on the fireworks pile."""
return card['rank'] == fireworks[card['color']]
def get_new_player_observed_hands(self, observation, c_obs, replaced_player_id, replacement_hand, their_id):
hand_list = []
c_card = pyhanabi.ffi.new("pyhanabi_card_t*")
for pid in range(c_obs.num_players()):
player_hand = []
hand_size = pyhanabi.lib.ObsGetHandSize(c_obs._observation, pid)
if pid == replaced_player_id:
for card in replacement_hand:
player_hand.append({'color': pyhanabi.color_idx_to_char(int(card / 5)), 'rank': card % 5})
elif pid == their_id:
for card in replacement_hand:
player_hand.append({'color': None, 'rank': -1})
else:
for i in range(hand_size):
pyhanabi.lib.ObsGetHandCard(c_obs._observation, pid, i, c_card)
player_hand.append(pyhanabi.HanabiCard(c_card.color, c_card.rank).to_dict())
hand_list.append(player_hand)
return hand_list
def level_one_belief(self, num_samples, probs, self_card_knowledge, self_id, other_card_knowledges, other_id, my_obs, their_obs, other_c_obs):
discards = my_obs['discard_pile']
# their_observed_hands = copy.deepcopy(their_obs['observed_hands'])
fireworks = my_obs['fireworks']
prob_vecs = np.zeros(shape=(len(self_card_knowledge), 25), dtype=float)
attempts = 0
while attempts < 100:
ind = 0
while ind < num_samples:
sample_hand = []
for prob in probs:
card_prob = np.array(prob, dtype=float) / np.sum(prob, dtype=float)
sample_hand.append(np.random.choice(25, 1, p=card_prob)[0])
hand_prob = self.hand_prob(probs, self_card_knowledge, sample_hand)
# for card_ind, card in enumerate(sample_hand):
# their_observed_hands[self_id][card_ind]['rank'] = card % 5
# their_observed_hands[self_id][card_ind]['color'] = pyhanabi.color_idx_to_char(card // 5)
their_observed_hands = self.get_new_player_observed_hands(their_obs, other_c_obs, self_id, sample_hand, other_id)
new_probs = np.array(self.card_knowledge_to_prob_vectors(other_card_knowledges, discards, their_observed_hands, fireworks, as_counts=False))
new_probs *= hand_prob
prob_vecs += new_probs
ind += 1
attempts += 1
if np.sum(prob_vecs) > 0.:
break
ret_list = []
for prob in prob_vecs:
prob /= np.sum(prob)
ret_list.append(list(prob))
return ret_list
@staticmethod
def hand_prob(prob_counts, self_knowledge, hand):
knowledge_set = set(self_knowledge)
match_inds = [[] for _ in knowledge_set]
for ind, knowledge in enumerate(knowledge_set):
for query_ind, query_knowledge in enumerate(self_knowledge):
if query_knowledge == knowledge:
match_inds[ind].append(query_ind)
prob = 1.
for ind_list in match_inds:
for ind in ind_list:
if np.sum(prob_counts[ind_list[0]], dtype=float) == 0.:
return 0.
card_prob = np.array(prob_counts[ind_list[0]], dtype=float) / np.sum(prob_counts[ind_list[0]], dtype=float)
card_ind = np.random.choice(25, 1, p=card_prob)[0]
prob_counts[ind_list[0]][card_ind] = prob_counts[ind_list[0]][card_ind] - 1
if prob_counts[ind_list[0]][card_ind] < 0:
return 0.
prob *= card_prob[card_ind]
return prob
@staticmethod
def card_knowledge_to_prob_vectors(card_knowledges, discards, observed_hands, fireworks, as_counts=False):
# discards = observation['discard_pile']
# observed_hands = observation['observed_hands']
# fireworks = observation['fireworks']
infosets = []
for card_knowledge in card_knowledges:
colors = []
ranks = []
valid_info = card_knowledge.split('|')[1]
for card_info in valid_info:
if card_info in pyhanabi.COLOR_CHAR:
colors.append(pyhanabi.color_char_to_idx(card_info))
else:
ranks.append(int(card_info) - 1)
# Store indices for length 50 vectors that will hold card counts/probs that should
# be updated using card counts
infoset = []
for color in colors:
for rank in ranks:
infoset.append((5 * color) + rank)
infosets.append(infoset)
card_counts = [0]*25
for ind in range(len(card_counts)):
if ind % 5 == 0:
card_counts[ind] = 3
elif ind % 5 < 4:
card_counts[ind] = 2
else:
card_counts[ind] = 1
for card in discards:
card_counts[(5 * pyhanabi.color_char_to_idx(card['color'])) + card['rank']] -= 1
for color, rank in fireworks.items():
if rank > 0:
for ind in range(rank):
card_counts[(5 * pyhanabi.color_char_to_idx(color)) + ind] -= 1
for hand in observed_hands:
for card_info in hand:
if card_info['rank'] < 0:
break
card_counts[(5 * pyhanabi.color_char_to_idx(card_info['color'])) + card_info['rank']] -= 1
prob_vecs = [[0.]*25 for _ in range(len(infosets))]
for set_ind, infoset in enumerate(infosets):
set_sum = 0.
for ind in infoset:
set_sum += float(card_counts[ind])
if card_counts[ind] > 0:
prob_vecs[set_ind][ind] = float(card_counts[ind])
if not as_counts and set_sum > 0:
for ind in range(25):
prob_vecs[set_ind][ind] /= set_sum
return prob_vecs
def act(self, observation):
"""Act based on an observation."""
if observation['current_player_offset'] != 0:
return None
# Check if there are any pending hints and play the card corresponding to
# the hint.
for card_index, hint in enumerate(observation['card_knowledge'][0]):
if hint['color'] is not None or hint['rank'] is not None:
return {'action_type': 'PLAY', 'card_index': card_index}
# Check if it's possible to hint a card to your colleagues.
fireworks = observation['fireworks']
if observation['information_tokens'] > 0:
# Check if there are any playable cards in the hands of the opponents.
for player_offset in range(1, observation['num_players']):
player_hand = observation['observed_hands'][player_offset]
player_hints = observation['card_knowledge'][player_offset]
# Check if the card in the hand of the opponent is playable.
for card, hint in zip(player_hand, player_hints):
# (ASF) Added for testing
if ProbAgent.playable_card(card, fireworks):
if hint['color'] is None and hint['rank'] is None:
if randint(0, 1) > 0:
return {
'action_type': 'REVEAL_COLOR',
'color': card['color'],
'target_offset': player_offset
}
else:
return {
'action_type': 'REVEAL_RANK',
'rank': card['rank'],
'target_offset': player_offset
}
# If no card is hintable then discard or play.
if observation['information_tokens'] < self.max_information_tokens:
return {'action_type': 'DISCARD', 'card_index': 0}
else:
return {'action_type': 'PLAY', 'card_index': 0}
|
{"hexsha": "dc6eb196f6d780f55f8a776aad976632d457cef8", "size": 7740, "ext": "py", "lang": "Python", "max_stars_repo_path": "prob_based_agent.py", "max_stars_repo_name": "mwalton/hanabi-aaai20", "max_stars_repo_head_hexsha": "fa39a82c4845c233ed0b5e41370a0e1eaff3b0f9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-06T11:38:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-06T11:38:49.000Z", "max_issues_repo_path": "prob_based_agent.py", "max_issues_repo_name": "mwalton/hanabi-aaai20", "max_issues_repo_head_hexsha": "fa39a82c4845c233ed0b5e41370a0e1eaff3b0f9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "prob_based_agent.py", "max_forks_repo_name": "mwalton/hanabi-aaai20", "max_forks_repo_head_hexsha": "fa39a82c4845c233ed0b5e41370a0e1eaff3b0f9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-17T19:34:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-17T19:34:42.000Z", "avg_line_length": 37.572815534, "max_line_length": 148, "alphanum_fraction": 0.6387596899, "include": true, "reason": "import numpy", "num_tokens": 1929}
|
"""
CrystalNets
Module for automatic reckognition of crystal net topologies.
To use as an executable, run the source file in a shell:
```bash
julia --project=$(normpath(@__DIR__, "..")) $(@__FILE__)
```
Otherwise, as a module, to try to reckognize the net underlying a crystal given in a
chemical file format called FILE, the entry point is the following execution:
```julia
julia> using CrystalNets
julia> reckognize_topology(topological_genome(CrystalNet(parse_chemfile(FILE))))
```
"""
module CrystalNets
export CrystalNet, topological_genome, parse_chemfile, reckognize_topology
import LinearAlgebra: det, norm, rank
using Base.Threads
using PeriodicGraphs
import PeriodicGraphs: hash_position
using StaticArrays
using LightGraphs
import Logging
import Logging: Warn, Info, @logmsg
const DOWARN = Base.RefValue{Bool}(false)
const DOEXPORT = Base.RefValue{Bool}(false)
function toggle_warning(to=nothing)
global DOWARN
DOWARN[] = to isa Nothing ? !DOWARN[] : to
end
function toggle_export(to=nothing)
global DOEXPORT
DOEXPORT[] = to isa Nothing ? !DOEXPORT[] : to
end
function __init__()
toggle_warning("--no-warn" ∉ ARGS)
toggle_export("--no-export" ∉ ARGS)
nothing
end
include("utils.jl")
__precompile__(true)
include("types.jl") # Main internal type definitions used to represent topologies
include("input.jl") # Crystal file parsing and conversion to an internal type
include("archive.jl") # Manipulation of the topological archive
include("output.jl")
include("arithmetics.jl")
include("symmetries.jl")
include("topology.jl") # Entry point for the main algorithm
include("executable.jl") # Entry point for the argument parsing of the executable
include("precompile.jl")
end # module CrystalNets
if abspath(PROGRAM_FILE) == @__FILE__
CrystalNets.julia_main()
end
|
{"hexsha": "98074a02fcfea8280a593f3bbd753f5b955a7646", "size": 1818, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/CrystalNets.jl", "max_stars_repo_name": "kjappelbaum/CrystalNets.jl", "max_stars_repo_head_hexsha": "a3ea0c02ad2125503b155dce1ec1499d842070ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-10-13T16:03:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T22:10:36.000Z", "max_issues_repo_path": "src/CrystalNets.jl", "max_issues_repo_name": "kjappelbaum/CrystalNets.jl", "max_issues_repo_head_hexsha": "a3ea0c02ad2125503b155dce1ec1499d842070ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-10-13T21:55:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T07:53:03.000Z", "max_forks_repo_path": "src/CrystalNets.jl", "max_forks_repo_name": "kjappelbaum/CrystalNets.jl", "max_forks_repo_head_hexsha": "a3ea0c02ad2125503b155dce1ec1499d842070ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-13T19:04:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-11T07:08:51.000Z", "avg_line_length": 27.1343283582, "max_line_length": 84, "alphanum_fraction": 0.7612761276, "num_tokens": 473}
|
# Face filters (Snapchat like) using OpenCV
# @author:- Webwares @2020
import cv2
import sys
import logging as log
import datetime as dt
from time import sleep
import numpy as np
import os
import subprocess
cascPath = "haarcascade_frontalface_default.xml" # for face detection
if not os.path.exists(cascPath):
subprocess.call(['./download_filters.sh'])
else:
print('Filters already exist!')
faceCascade = cv2.CascadeClassifier(cascPath)
log.basicConfig(filename='webcam.log',level=log.INFO)
video_capture = cv2.VideoCapture(0)
anterior = 0
mst = cv2.imread('moustache.png')
hat = cv2.imread('cowboy_hat.png')
dog = cv2.imread('dog_filter.png')
def put_moustache(mst,fc,x,y,w,h):
face_width = w
face_height = h
mst_width = int(face_width*0.4166666)+1
mst_height = int(face_height*0.142857)+1
mst = cv2.resize(mst,(mst_width,mst_height))
for i in range(int(0.62857142857*face_height),int(0.62857142857*face_height)+mst_height):
for j in range(int(0.29166666666*face_width),int(0.29166666666*face_width)+mst_width):
for k in range(3):
if mst[i-int(0.62857142857*face_height)][j-int(0.29166666666*face_width)][k] <235:
fc[y+i][x+j][k] = mst[i-int(0.62857142857*face_height)][j-int(0.29166666666*face_width)][k]
return fc
def put_hat(hat,fc,x,y,w,h):
face_width = w
face_height = h
hat_width = face_width+1
hat_height = int(0.35*face_height)+1
hat = cv2.resize(hat,(hat_width,hat_height))
for i in range(hat_height):
for j in range(hat_width):
for k in range(3):
if hat[i][j][k]<235:
fc[y+i-int(0.25*face_height)][x+j][k] = hat[i][j][k]
return fc
def put_dog_filter(dog,fc,x,y,w,h):
face_width = w
face_height = h
dog = cv2.resize(dog,(int(face_width*1.5),int(face_height*1.75)))
for i in range(int(face_height*1.75)):
for j in range(int(face_width*1.5)):
for k in range(3):
if dog[i][j][k]<235:
fc[y+i-int(0.375*h)-1][x+j-int(0.25*w)][k] = dog[i][j][k]
return fc
ch = 0
print "Select Filter:1.) Hat 2.) Moustache 3.) Hat and Moustache 4.) Dog Filter"
ch = int(raw_input())
while True:
if not video_capture.isOpened():
print('Unable to load camera.')
sleep(5)
pass
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(40,40)
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
#cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv2.putText(frame,"Person Detected",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),2)
if ch==2:
frame = put_moustache(mst,frame,x,y,w,h)
elif ch==1:
frame = put_hat(hat,frame,x,y,w,h)
elif ch==3:
frame = put_moustache(mst,frame,x,y,w,h)
frame = put_hat(hat,frame,x,y,w,h)
else:
frame = put_dog_filter(dog,frame,x,y,w,h)
if anterior != len(faces):
anterior = len(faces)
log.info("faces: "+str(len(faces))+" at "+str(dt.datetime.now()))
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
{"hexsha": "71ab8db66512393d36b912810db06acff09faf7e", "size": 3637, "ext": "py", "lang": "Python", "max_stars_repo_path": "snapchat.py", "max_stars_repo_name": "webwares/snapchat", "max_stars_repo_head_hexsha": "1b90d2de66a7acd36052b7ab7cb3fe0528ead506", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "snapchat.py", "max_issues_repo_name": "webwares/snapchat", "max_issues_repo_head_hexsha": "1b90d2de66a7acd36052b7ab7cb3fe0528ead506", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "snapchat.py", "max_forks_repo_name": "webwares/snapchat", "max_forks_repo_head_hexsha": "1b90d2de66a7acd36052b7ab7cb3fe0528ead506", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1654676259, "max_line_length": 111, "alphanum_fraction": 0.5999450096, "include": true, "reason": "import numpy", "num_tokens": 1078}
|
[STATEMENT]
lemma (in cf_scospan) the_cf_scospan_ArrMap_app_\<bb>[cat_ss_cs_simps]:
assumes "f = \<bb>\<^sub>S\<^sub>S"
shows "\<langle>\<aa>\<rightarrow>\<gg>\<rightarrow>\<oo>\<leftarrow>\<ff>\<leftarrow>\<bb>\<rangle>\<^sub>C\<^sub>F\<^bsub>\<CC>\<^esub>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<CC>\<lparr>CId\<rparr>\<lparr>\<bb>\<rparr>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<langle>\<aa>\<rightarrow>\<gg>\<rightarrow>\<oo>\<leftarrow>\<ff>\<leftarrow>\<bb>\<rangle>\<^sub>C\<^sub>F\<^bsub>\<CC>\<^esub>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<CC>\<lparr>CId\<rparr>\<lparr>\<bb>\<rparr>
[PROOF STEP]
using cat_ss_ineq
[PROOF STATE]
proof (prove)
using this:
\<aa>\<^sub>S\<^sub>S \<noteq> \<bb>\<^sub>S\<^sub>S
\<aa>\<^sub>S\<^sub>S \<noteq> \<oo>\<^sub>S\<^sub>S
\<bb>\<^sub>S\<^sub>S \<noteq> \<oo>\<^sub>S\<^sub>S
\<gg>\<^sub>S\<^sub>S \<noteq> \<ff>\<^sub>S\<^sub>S
\<gg>\<^sub>S\<^sub>S \<noteq> \<aa>\<^sub>S\<^sub>S
\<gg>\<^sub>S\<^sub>S \<noteq> \<bb>\<^sub>S\<^sub>S
\<gg>\<^sub>S\<^sub>S \<noteq> \<oo>\<^sub>S\<^sub>S
\<ff>\<^sub>S\<^sub>S \<noteq> \<aa>\<^sub>S\<^sub>S
\<ff>\<^sub>S\<^sub>S \<noteq> \<bb>\<^sub>S\<^sub>S
\<ff>\<^sub>S\<^sub>S \<noteq> \<oo>\<^sub>S\<^sub>S
goal (1 subgoal):
1. \<langle>\<aa>\<rightarrow>\<gg>\<rightarrow>\<oo>\<leftarrow>\<ff>\<leftarrow>\<bb>\<rangle>\<^sub>C\<^sub>F\<^bsub>\<CC>\<^esub>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> = \<CC>\<lparr>CId\<rparr>\<lparr>\<bb>\<rparr>
[PROOF STEP]
by
(
cs_concl
cs_simp: V_cs_simps the_cf_scospan_ArrMap_app assms
cs_intro: cat_ss_cs_intros
)
|
{"llama_tokens": 705, "file": "CZH_Elementary_Categories_czh_ecategories_CZH_ECAT_SS", "length": 2}
|
#ASSUMES DATA WITH THROTTLING, NO DECOR STALL
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
class Cycle_Dump:
stat = None
supply_current = None
supply_voltage = None
def __init__(self, stats):
self.stats = stats
self.stats.readline()
def getValue(self, line):
linespl = line.split()
return float(linespl[1])
# statToFunc = {
# "system.cpu.powerPred.supply_current" : (getValue, supply_current),
# "system.cpu.powerPred.supply_voltage" : (getValue, supply_voltage)
# }
def parseCycle(self):
while(True):
line = self.stats.readline()
if not line:
return True
#end of 1 cycle of stat dump
elif (not line.upper().isupper()):
for _ in range(4):
self.stats.readline()
return False
else:
stat_name = line.split()[0].split(':')[0]
#if stat_name in self.statToFunc.keys():
if stat_name == "system.cpu.powerPred.supply_current":
self.supply_current = self.getValue(line)
elif stat_name == "system.cpu.powerPred.supply_voltage":
self.supply_voltage = self.getValue(line)
def dump(self):
print('******* CYCLE: ',self.cycle,'*********')
print('SUPPLY CURRENT: ', self.supply_curr)
print('SUPPLY VOLTAGE: ', self.supply_volt)
#PARAMETERS
HOME = os.environ['HOME']
OUTPUT_DIR = 'output_1_6'
TEST = 'crc'
NUM_INSTR = '1001'
VERSION = '1'
PDN = 'DESKTOP_INTEL_DT'
PREDICTOR = 'HarvardPowerPredictor_1'
path = HOME+'/'+OUTPUT_DIR+'/gem5_out/'+TEST+'_'+NUM_INSTR+'_'+VERSION+'_'+PDN+'_'+PREDICTOR+'/stats.txt'
print(path)
stats = open(path, 'r')
start_cycle = 0
end_cycle = 1000
#END PARAMETERS
fig = plt.figure(figsize=(60,5))
ax = plt.axes()
fig.suptitle('Supply Voltage Over Time' + '(' + PREDICTOR + ', ' + PDN + ', ' + TEST + ' )', fontsize=14)
ax.set_xlabel('Cycle', fontsize=18)
ax.set_ylabel('Supply Voltage', fontsize=18)
ax2 = ax.twinx()
ax2.set_ylabel('Current', color='tab:blue', fontsize=18) # we already handled the x-label with ax1
voltage = [0]
current =[0]
cycle_dump = Cycle_Dump(stats)
while True:
EOF = cycle_dump.parseCycle()
if EOF:
break
voltage.append(cycle_dump.supply_voltage)
current.append(cycle_dump.supply_current)
xvar = np.linspace(0,len(voltage),len(voltage))
ax.plot(xvar, voltage,color='black', linewidth=1.0)
ax.set_ylim(bottom = min(i for i in voltage if i > 0.8), top = max(voltage))
ax2.plot(xvar, current, color='tab:blue')
ax2.tick_params(axis='y', labelcolor='tab:blue')
ax2.set_ylim([min(i for i in current if i > 0.8), max(current)])
plt.xlim(left = start_cycle, right = min(end_cycle,len(xvar)) )
plt.savefig(HOME+'/plot/1-6_1x_Vs&Is_vs_time_' + PDN + '_' + TEST +'.png', dpi=300)
|
{"hexsha": "d9de92d3f470460cdc3e3e6132d50f9b3ffe9e77", "size": 2979, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/jimmy_plot/deprecated/supply_curr+volt_over_time.py", "max_stars_repo_name": "JimmyZhang12/predict-T", "max_stars_repo_head_hexsha": "8ae818b0791104de20633ce91e6d633cda7445b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/jimmy_plot/deprecated/supply_curr+volt_over_time.py", "max_issues_repo_name": "JimmyZhang12/predict-T", "max_issues_repo_head_hexsha": "8ae818b0791104de20633ce91e6d633cda7445b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/jimmy_plot/deprecated/supply_curr+volt_over_time.py", "max_forks_repo_name": "JimmyZhang12/predict-T", "max_forks_repo_head_hexsha": "8ae818b0791104de20633ce91e6d633cda7445b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.79, "max_line_length": 105, "alphanum_fraction": 0.6193353474, "include": true, "reason": "import numpy", "num_tokens": 791}
|
# -*- coding: utf-8 -*-
import argparse
import inspect
import math
import numpy as np
import os
from pprint import pprint
import sys
# add parent directory to sys path to import relative modules
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from lib.composition_utils import *
from lib.io_utils import *
from lib.math_utils import *
from lib.video_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-width', dest="WIDTH", default=1920, type=int, help="Output video width")
parser.add_argument('-height', dest="HEIGHT", default=1080, type=int, help="Output video height")
parser.add_argument('-margin', dest="CLIP_MARGIN", default=1.0, type=float, help="Output video height")
parser.add_argument('-grid', dest="GRID", default="256x256", help="Size of grid")
a = parser.parse_args()
# parse arguments
GRID_W, GRID_H = tuple([int(v) for v in a.GRID.strip().split("x")])
samples = [{"index": i, "filename": "", "start": 0, "dur": 0} for i in range(GRID_W*GRID_H)]
samples = addGridPositions(samples, GRID_W, a.WIDTH, a.HEIGHT, marginX=a.CLIP_MARGIN, marginY=a.CLIP_MARGIN*(1.0*a.HEIGHT/a.WIDTH))
# pprint(samples[0])
for i, s in enumerate(samples):
pixels = np.array([[getRandomColor(i)]])
samples[i]["framePixelData"] = [pixels]
clipsToFrame({
"filename": "output/test_grid.png",
"width": a.WIDTH,
"height": a.HEIGHT,
"clips": samples,
"overwrite": True,
"debug": True
})
clipsToFrame({
"filename": "output/test_grid2.png",
"width": 100,
"height": 100,
"clips": [{
"width": 99.0,
"height": 99.0,
"x": 0.5,
"y": 0.5,
"index": 0,
"framePixelData": [np.array([[[255,0,0]]])]
}],
"overwrite": True,
"debug": True
})
clipsToFrame({
"filename": "output/test_grid3.png",
"width": 100,
"height": 100,
"clips": [{
"width": 99.5,
"height": 99.5,
"x": 0.0,
"y": 0.0,
"index": 0,
"framePixelData": [np.array([[[255,0,0]]])]
}],
"overwrite": True,
"debug": True
})
clipsToFrame({
"filename": "output/test_grid4.png",
"width": 100,
"height": 100,
"clips": [{
"width": 99.5,
"height": 99.5,
"x": 0.5,
"y": 0.5,
"index": 0,
"framePixelData": [np.array([[[255,0,0]]])]
}],
"overwrite": True,
"debug": True
})
print("Done.")
|
{"hexsha": "2f6e6843c220eddc3fc955a146b36e8d772aaf8c", "size": 2513, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/grid.py", "max_stars_repo_name": "skratchdot/media-tools", "max_stars_repo_head_hexsha": "bca0c683fb637aeefda1c49454a118f809047d97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-12-09T07:56:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-03T01:45:53.000Z", "max_issues_repo_path": "tests/grid.py", "max_issues_repo_name": "skratchdot/media-tools", "max_issues_repo_head_hexsha": "bca0c683fb637aeefda1c49454a118f809047d97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-29T00:00:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-09T14:24:19.000Z", "max_forks_repo_path": "tests/grid.py", "max_forks_repo_name": "skratchdot/media-tools", "max_forks_repo_head_hexsha": "bca0c683fb637aeefda1c49454a118f809047d97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-27T15:36:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-29T17:52:35.000Z", "avg_line_length": 25.9072164948, "max_line_length": 131, "alphanum_fraction": 0.6076402706, "include": true, "reason": "import numpy", "num_tokens": 749}
|
"""Classes for creating and augmenting Octrees"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from ocnn.dataset.data_processor import DataProcessor
from ocnn.octree._octree import Octree
from ocnn.octree._octree import OctreeInfo
from ocnn.octree._octree import Points
class OctreeProcessor(DataProcessor):
""" Reads points files and processes them into octrees"""
def __init__(self, octree_settings, augmentor_collection=None):
""" Initialies OctreeProcessor
Args:
octree_settings: OctreeSettings object
augmentor_collection: AugmentorCollection object.
"""
self.octree_settings = octree_settings
super(OctreeProcessor, self).__init__(augmentor_collection)
def process(self, file_path, aug_index):
""" Processess points file into octree
Args:
file_path: Path to points file
aug_index: Augmentation index of total augmentations.
"""
points = Points(file_path)
octree_info = OctreeInfo()
octree_info.initialize(
self.octree_settings.depth,
self.octree_settings.full_depth,
self.octree_settings.node_displacement,
self.octree_settings.node_feature,
self.octree_settings.split_label,
self.octree_settings.adaptive,
self.octree_settings.adaptive_depth,
self.octree_settings.threshold_distance,
self.octree_settings.threshold_normal,
self.octree_settings.key2xyz,
points)
self.augmentor_collection.augment(points, aug_index)
radius, center = points.get_points_bounds()
octree_info.set_bbox(radius, center)
return Octree(octree_info, points)
|
{"hexsha": "d8fa3614417ccaf439c31ea74c5da278582f67cc", "size": 1847, "ext": "py", "lang": "Python", "max_stars_repo_path": "ocnn/octree/python/ocnn/octree/octree_processor.py", "max_stars_repo_name": "FrozenSilent/O-CNN", "max_stars_repo_head_hexsha": "9527cd7670856229dfc3281bc05d2077a0553ec3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-05-18T12:41:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T10:02:47.000Z", "max_issues_repo_path": "ocnn/octree/python/ocnn/octree/octree_processor.py", "max_issues_repo_name": "FrozenSilent/O-CNN", "max_issues_repo_head_hexsha": "9527cd7670856229dfc3281bc05d2077a0553ec3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ocnn/octree/python/ocnn/octree/octree_processor.py", "max_forks_repo_name": "FrozenSilent/O-CNN", "max_forks_repo_head_hexsha": "9527cd7670856229dfc3281bc05d2077a0553ec3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2037037037, "max_line_length": 67, "alphanum_fraction": 0.7000541419, "include": true, "reason": "import numpy", "num_tokens": 393}
|
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# MODEL POM - Princeton Ocean Model
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#
# # ROUTINE: Profq
#
# DESCRIPTION
#
# This subroutine solves for the turbulent closure.
# Turbulent kinetic energy (Q2/2)
# Turbulent length scale (Q2l)
#
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
import numpy as np
from decimal import *
def PROFQ(DT2):
from BFM17_POM1D_VrsFnl.src.BFM.General.ModuleGlobalMem import RLEN
from BFM17_POM1D_VrsFnl.src.pom.phys.POMModule import H, A, C, KB, KQ, DZZ, DZ, VH, VHP, \
WUSURF, WVSURF, WUBOT, WVBOT, Q2F, S, T, Q2LB, RHO, DTEF, SPROD, KM, U, V, BPROD, PROD, \
Q2LF, Z, L, SH, SM, KN, KH, GM, GH, ZZ, Q2B, Q2, UMOL
getcontext().prec = 12 # 12-digit precision (ilong)
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# SCALAR ARGUMENTS
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# DT2 = Decimal()
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# LOCAL SCALARS
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
A1, A2, B1, B2, C1, CIWC, COEF1, COEF2, COEF3, COEF4, COEF5, CONST1, \
DH, E1, E2, E3, GEE, KAPPA, P, SMALL, SQ, ZCO, COEF6 = Decimal()
K, KI = Decimal()
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# LOCAL ARRAYS
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
BOYGR, CC, TEMP1, TEMP2, TEMP3 = np.empty(KB,dtype=float)
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# DATA STATEMENTS
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
A1 = 0.92
B1 = 16.6
A2 = 0.74
B2 = 10.1
C1 = 0.08
E1 = 1.8
E2 = 1.33
E3 = 1.0
KAPPA = 0.40
SQ = 0.2
CIWC = 1.0
GEE = 9.806
SMALL = 1.E-8
# SM = KB*0.39
# SH = KB*0.49
# GM = KB*0.154
# GH = KB*0.154
DH = H
for K in range(1, KB - 1):
A[K] = -DT2 * (KQ[K + 1] + KQ[K] + 2 * UMOL) * 0.5 / (DZZ[K - 1] * DZ[K] * DH * DH)
C[K] = -DT2 * (KQ[K - 1] + KQ[K] + 2 * UMOL) * 0.5 / (DZZ[K - 1] * DZ[K - 1] * DH * DH)
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# THE FOLLOWING SECTION SOLVES FOR THE EQUATION
# DT2*(KQ*Q2')' - Q2*(2.*DT2*DTEF+1.) = -Q2B
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
CONST1 = 16.6 ** 0.6666667 * CIWC
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# BOUNDARY CONDITIONS
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
VH[0] = 0.0
VHP[0] = np.sqrt(WUSURF ** 2 + WVSURF ** 2) * CONST1
Q2F[KB - 1] = 0.5 * np.sqrt((WUBOT + WUBOT) ** 2 + (WVBOT + WVBOT) ** 2) * CONST1
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# CALCULATE PRESSURE IN UNITS OF DECIBARS
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# CC(K) = 1449.2 + 1.34* (S(K)-35.) + 4.55*T(K) - 0.045*T(K)**2 + 0.00821*P + (15.0**1.e-9*P**2)
# TEMP1(K) = 2./CC(K)
# TEMP2(K) = (0.00821*P)
# TEMP3(K) = (1.-0.40* (P/CC(K)**2))
for K in range(0, KB - 1):
CC[K] = CC[K] * (1. - TEMP1[K] * (TEMP2[K] + 15. * 1.e-9 * P ** 2) * TEMP3[K]) ** (-0.5)
P = -GEE * 1.025 * ZZ[K] * DH * .1
CC[K] = 1449.1 + .00821 * P + 4.55 * T[K] - .045 * T[K] ** 2 + 1.34 * (S[K] - 35.)
CC[K] = CC[K] / np.sqrt((1. - .01642 * P / CC[K]) * (1. - 0.40 * P / CC[K] ** 2))
for K in range(1, KB - 1):
Q2B[K] = np.abs(Q2B[K])
Q2LB[K] = np.abs(Q2LB[K])
BOYGR[K] = GEE * (RHO[K - 1] - RHO[K]) / (
DZZ[K - 1] * DH) # & (G) +GEE ** 2 * 2. * 1.025 / (CC(K - 1) ** 2 + CC(K) ** 2)(G)
DTEF[K] = Q2B[K] * np.sqrt(Q2B[K]) / (B1 * Q2LB[K] + SMALL)
SPROD[K] = .25 * KM[K] * ((U[K] + U[K] - U[K - 1] - U[K - 1]) ** 2 + (V[K] + V[K] - V[K - 1] - V[K - 1]) ** 2) / (DZZ[K - 1] * DH) ** 2 * CIWC ** 2
BPROD[K] = KH[K] * BOYGR[K]
PROD[K] = SPROD[K] + BPROD[K]
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# SWEEP DOWNWARD
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
for K in range(1, KB - 1):
VHP[K] = 1. / (A[K] + C[K] * (1. - VH[K - 1]) - (2. * DT2 * DTEF[K] + 1.))
VH[K] = A[K] * VHP[K]
VHP[K] = (-2. * DT2 * PROD[K] + C[K] * VHP[K - 1] - Q2B[K]) * VHP[K]
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# SWEEP UPWARD
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
for K in range(0, KB - 1): # 104
KI = KB - K
Q2F[KI] = VH[KI] * Q2F[KI + 1] + VHP[KI]
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# THE FOLLOWING SEECTION SOLVES FOR TEH EQUATION
# DT2(KQ*Q2L')' - Q2L*(DT2*DTEF+1.) = -Q2LB
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# BOUNDARY CONDITIONS
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
VH[0] = 0.
VHP[0] = 0.
Q2LF[KB - 1] = 0.
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# SWEEP DOWNWARD
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
for K in range(1, KB - 1):
DTEF[K] = DTEF[K] * (
1. + E2 * ((1. / np.abs(Z[K] - Z[0]) + 1. / np.abs(Z[K] - Z[KB])) * L[K] / (DH * KAPPA)) ** 2)
VHP[K] = 1. / (A[K] + C[K] * (1. - VH[K - 1]) - (DT2 * DTEF[K] + 1.))
VH[K] = A[K] * VHP[K]
VHP[K] = (DT2 * (- (SPROD[K] + E3 * BPROD[K]) * L[K] * E1) + C[K] * VHP[K - 1] - Q2LB[K]) * VHP[K]
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# SWEEP UPWARD
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
for K in range(0, KB - 1):
KI = KB - K
Q2LF[KI] = VH[KI] * Q2LF[KI + 1] + VHP[KI]
for K in range(1, KB - 1):
if Q2F[K] > SMALL or Q2LF[K] > SMALL:
break
Q2F[K] = SMALL
Q2LF[K] = SMALL
for K in range(0, KB - 1):
Q2F[K] = np.abs(Q2F[K])
Q2LF[K] = np.abs(Q2LF[K])
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# THE FOLLOWING SECTION SOLVES FOR KM AND KH
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
COEF1 = A2 * (1. - 6. * A1 / B1)
COEF2 = 3. * A2 * B2 + 18. * A1 * A2
COEF3 = A1 * (1. - 3. * C1 - 6. * A1 / B1)
COEF4 = 18. * A1 * A1 + 9. * A1 * A2
COEF5 = 9. * A1 * A2
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# NOTE THAT SM AND SH LIMIT TO INFINITY WHEN GH APPROACHES 0.0288
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
L[0] = 0.
L[KB - 1] = 0.
GH[0] = 0.
GH[KB - 1] = 0.
for K in range(1, KB - 1):
L[K] = Q2LF[K] / Q2F[K]
GH[K] = L[K] ** 2 / Q2F[K] * BOYGR[K]
for K in range(0, KB):
GH[K] = np.mininimum(GH[K], .028)
SH[K] = COEF1 / (1. - COEF2 * GH[K])
SM[K] = COEF3 + SH(K) * COEF4 * GH[K]
SM[K] = SM[K] / (1. - COEF5 * GH[K])
for K in range(0, KB):
KN[K] = L[K] * np.sqrt(np.abs(Q2[K]))
KQ[K] = (KN[K] * .41 * SM[K] + KQ[K]) * .5
# KQ[K]= (KN[K] * .41 * SH[K] + KQ[K]) * .5
KM[K] = (KN[K] * SM[K] + KM[K]) * .5
KH[K] = (KN[K] * SH[K] + KH[K]) * .5
return
# EOC
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# MODEL POM - Princeton Ocean Model
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
{"hexsha": "0cd8ca23284f257a9d4002157f1f6b449b2c2262", "size": 9597, "ext": "py", "lang": "Python", "max_stars_repo_path": "BFM17_POM1D_VrsFnl/src/pom/phys/profq1d.py", "max_stars_repo_name": "kyleniemeyer/pyPOM1D", "max_stars_repo_head_hexsha": "4eeb1ca16abe07d039c634f2338ebac395692362", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BFM17_POM1D_VrsFnl/src/pom/phys/profq1d.py", "max_issues_repo_name": "kyleniemeyer/pyPOM1D", "max_issues_repo_head_hexsha": "4eeb1ca16abe07d039c634f2338ebac395692362", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BFM17_POM1D_VrsFnl/src/pom/phys/profq1d.py", "max_forks_repo_name": "kyleniemeyer/pyPOM1D", "max_forks_repo_head_hexsha": "4eeb1ca16abe07d039c634f2338ebac395692362", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.7, "max_line_length": 155, "alphanum_fraction": 0.2492445556, "include": true, "reason": "import numpy", "num_tokens": 3005}
|
import argparse
import numpy as np
from sta663_project_lda.visualization.demo_topics import topic_viz
class LDASVI(object):
def __init__(self, datadir, K, alpha0=None, gamma0=None,
MB=256, kappa=0.5, tau0=256, eps=1e-3):
self.wordcnt_mat = np.load(datadir) # word-count matrix
self.vocab_size = self.wordcnt_mat.shape[0] # number of words in vocabulary
self.D = self.wordcnt_mat.shape[1] # number of documents
self.K = K # number of topics
self.alpha0 = 1/self.K if alpha0==None else alpha0
self.gamma0 = 1/self.K if gamma0==None else gamma0
self.MB = MB # mini-batch size
self.epoch_len = self.D // self.MB # number of iterations in each epoch
self.kappa = kappa # learning rate parameters
self.tau0 = tau0 # learningg rate parameters
self.eps = eps # criterion of convergence for local updates
def train_numpy(self, epoch=10, seed=0, printcycle=10):
import numpy as np
from scipy.special import psi
np.random.seed(seed)
gamma = np.random.rand(self.vocab_size, self.K) # initialization of topics
phi = {} # topic assignments
for ep in range(epoch):
order = np.random.permutation(self.D)
for t in range(self.epoch_len):
itr = ep * self.epoch_len + t
if printcycle!=0 and itr%printcycle==0:
print('starting iteration %i'%itr)
'''E-Step: update local variational parameters(phi,alpha) till convergent'''
sample_id = order[t*self.MB:(t+1)*self.MB]
alpha = np.ones((self.K, self.MB)) # initialization of topic proportions
psi_sum_gam = psi(np.sum(gamma, axis=0))
diff = self.eps + 1
while(diff>self.eps):
diff = 0
for i in range(self.MB):
tmp_id = np.nonzero(self.wordcnt_mat[:, sample_id[i]])[0]
tmp_cnt = self.wordcnt_mat[tmp_id, sample_id[i]]
# update topic assignement for each word in each document
tmp_phi = np.exp(psi(gamma[tmp_id,:]) - psi_sum_gam + psi(alpha[:,i]) - psi(np.sum(alpha[:,i])))
phi[i] = tmp_phi / np.reshape(np.sum(tmp_phi,axis=1), (-1,1))
# update topic proportion for each document
tmp_alpha = self.alpha0 + tmp_cnt[None,:] @ phi[i]
# accumulate diff to decide local convergence
diff += np.sum(np.abs(tmp_alpha-alpha[:,i]))
alpha[:,i] = tmp_alpha
diff = diff / self.K / self.MB
'''M-Step: update global variational parameters(gamma)'''
tmp_gamma = np.zeros((self.vocab_size, self.K))
for i in range(self.MB):
tmp_id = np.nonzero(self.wordcnt_mat[:, sample_id[i]])[0]
tmp_cnt = self.wordcnt_mat[tmp_id, sample_id[i]]
tmp_gamma[tmp_id, :] += phi[i] * tmp_cnt[:,None]
tmp_gamma = self.gamma0 + tmp_gamma * self.D / self.MB
rho_t = (self.tau0 + itr)**(-self.kappa)
gamma = (1-rho_t)*gamma + rho_t*tmp_gamma
return gamma # no need to return alpha, since alpha only includes topic proportion of a mini-batch of documents
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Stochastic Variational Inference Training Paramters.')
parser.add_argument('--datadir', dest='datadir', action='store',
help='Path of the training data', default='./data/toydata_mat.npy')
parser.add_argument('-K', dest='K', type=int,
help='Number of Topics', default=2)
parser.add_argument('--MB', dest='MB', type=int,
help='minibatch size', default=20)
args = parser.parse_args()
lda = LDASVI(args.datadir, args.K, MB=args.MB)
gamma = lda.train_numpy(epoch=50)
vocabulary = np.load('./data/toydata_voc.npy')
topic_viz(gamma,vocabulary,topk=5)
|
{"hexsha": "d071ce4899ff1329f18e0657afdc4a2362c786c1", "size": 4238, "ext": "py", "lang": "Python", "max_stars_repo_path": "sta663_project_lda/algorithms/lda_svi.py", "max_stars_repo_name": "haofuml/sta663_project_lda", "max_stars_repo_head_hexsha": "d9d0253f61996fef48e9909aecf583e70e318aff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-05-18T13:37:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T07:29:47.000Z", "max_issues_repo_path": "sta663_project_lda/algorithms/lda_svi.py", "max_issues_repo_name": "haofuml/sta663_project_lda", "max_issues_repo_head_hexsha": "d9d0253f61996fef48e9909aecf583e70e318aff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sta663_project_lda/algorithms/lda_svi.py", "max_forks_repo_name": "haofuml/sta663_project_lda", "max_forks_repo_head_hexsha": "d9d0253f61996fef48e9909aecf583e70e318aff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.8588235294, "max_line_length": 120, "alphanum_fraction": 0.5646531383, "include": true, "reason": "import numpy,from scipy", "num_tokens": 985}
|
[STATEMENT]
lemma subst_poly_scaleRat: "subst_poly \<sigma> (r *R p) = r *R (subst_poly \<sigma> p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subst_poly \<sigma> (r *R p) = r *R subst_poly \<sigma> p
[PROOF STEP]
by (rule linear_poly_eqI, unfold valuate_scaleRat valuate_subst_poly, simp)
|
{"llama_tokens": 122, "file": "Farkas_Farkas", "length": 1}
|
import re
import numpy as np
def clean(rows):
"""Cleans JSON file for each race. rows is a list of dictionaries.
"""
rows = map(handle_missing, rows)
rows = map(process_rider, rows)
return rows
def handle_missing(row):
"""Removes the Place column from a row if result was a DNF/DNP/DQ.
"""
if any(row[col] == 1 for col in ['IsDnf', 'IsDNP', 'IsDQ']):
row.pop('Place')
return row
def process_rider(row):
"""Does preprocessing related to each individual rider:
- Combines FirstName and LastName
- Removes row (replaces with empty dict) if racer has a missing name or
the name contains digits
- Consolidates age columns
"""
# Missing names - there may be more!
if (row['RacerID'] in [3288, 61706, 832, 351]) \
or (not row['FirstName']) \
or (row['FirstName'] == 'Unknown') \
or (re.search(r'[\d]', row['FirstName'])) \
or (re.search(r'[\d]', row['LastName'])):
return {}
# Combine names
row['Name'] = ' '.join([row['FirstName'], row['LastName']])
# Combine age
row['Age'] = max(row['CalculatedAge'] or 0, row['ReportedAge'] or 0)
return row
|
{"hexsha": "4abc7b1a6fcda1b1fb8af3e6369691201367475d", "size": 1184, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess.py", "max_stars_repo_name": "physinet/road-results", "max_stars_repo_head_hexsha": "a55d9a54f9fc7b6e854de30777762df717d39d97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-12-17T23:58:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T01:56:14.000Z", "max_issues_repo_path": "preprocess.py", "max_issues_repo_name": "physinet/road-results", "max_issues_repo_head_hexsha": "a55d9a54f9fc7b6e854de30777762df717d39d97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocess.py", "max_forks_repo_name": "physinet/road-results", "max_forks_repo_head_hexsha": "a55d9a54f9fc7b6e854de30777762df717d39d97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6, "max_line_length": 75, "alphanum_fraction": 0.6021959459, "include": true, "reason": "import numpy", "num_tokens": 326}
|
/*
* Copyright 2014 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <thrift/lib/cpp/async/TAsyncSocket.h>
#include <thrift/lib/cpp/protocol/TBinaryProtocol.h>
#include <thrift/lib/cpp/transport/THttpServer.h>
#include <thrift/lib/cpp/util/ScopedServerThread.h>
#include <thrift/lib/cpp/util/TThreadedServerCreator.h>
#include <thrift/lib/cpp2/test/gen-cpp/TestService.h>
#include <thrift/lib/cpp2/test/gen-cpp2/TestService.h>
#include <thrift/lib/cpp2/async/HeaderClientChannel.h>
#include <boost/lexical_cast.hpp>
using namespace apache::thrift;
using namespace apache::thrift::async;
using namespace apache::thrift::protocol;
using namespace apache::thrift::test;
using namespace apache::thrift::test::cpp2;
using namespace apache::thrift::transport;
using namespace apache::thrift::util;
using std::string;
class TestServiceHandler : public TestServiceIf {
public:
void sendResponse(string& _return, int64_t size) override {
_return = "test" + boost::lexical_cast<std::string>(size);
}
void noResponse(int64_t size) override { usleep(size); }
void echoRequest(string& _return, const string& req) override {
_return = req + "ccccccccccccccccccccccccccccccccccccccccccccc";
}
void serializationTest(string& _return, bool inEventBase) override {
_return = string(4096, 'a');
}
void eventBaseAsync(string& _return) override { _return = "hello world"; }
void notCalledBack() override {}
void voidResponse() override {}
};
std::unique_ptr<ScopedServerThread> createHttpServer() {
auto handler = std::make_shared<TestServiceHandler>();
auto processor = std::make_shared<TestServiceProcessor>(handler);
std::shared_ptr<TTransportFactory> transportFactory =
std::make_shared<THttpServerTransportFactory>();
std::shared_ptr<TProtocolFactory> protocolFactory =
std::make_shared<TBinaryProtocolFactoryT<THttpServer>>();
TThreadedServerCreator serverCreator(processor,
0,
transportFactory,
protocolFactory);
return folly::make_unique<ScopedServerThread>(&serverCreator);
}
TEST(HeaderClientChannelHttpTest, SimpleTest) {
std::unique_ptr<ScopedServerThread> serverThread = createHttpServer();
TEventBase eb;
const folly::SocketAddress* addr = serverThread->getAddress();
std::shared_ptr<TAsyncSocket> socket = TAsyncSocket::newSocket(&eb, *addr);
std::unique_ptr<HeaderClientChannel, TDelayedDestruction::Destructor> channel(
new HeaderClientChannel(socket));
channel->useAsHttpClient("127.0.0.1", "meh");
TestServiceAsyncClient client(std::move(channel));
client.sendResponse(
[] (apache::thrift::ClientReceiveState&& state) {
if (state.exception()) {
try {
std::rethrow_exception(state.exception());
} catch (const std::exception& e) {
LOG(INFO) << e.what();
}
}
EXPECT_TRUE(state.exception() == nullptr);
std::string res;
TestServiceAsyncClient::recv_sendResponse(res, state);
EXPECT_EQ(res, "test24");
},
24);
eb.loop();
client.eventBaseAsync(
[] (apache::thrift::ClientReceiveState&& state) {
EXPECT_TRUE(state.exception() == nullptr);
std::string res;
TestServiceAsyncClient::recv_eventBaseAsync(res, state);
EXPECT_EQ(res, "hello world");
});
eb.loop();
}
TEST(HeaderClientChannel, LongResponse) {
std::unique_ptr<ScopedServerThread> serverThread = createHttpServer();
TEventBase eb;
const folly::SocketAddress* addr = serverThread->getAddress();
std::shared_ptr<TAsyncSocket> socket = TAsyncSocket::newSocket(&eb, *addr);
std::unique_ptr<HeaderClientChannel, TDelayedDestruction::Destructor> channel(
new HeaderClientChannel(socket));
channel->useAsHttpClient("127.0.0.1", "meh");
TestServiceAsyncClient client(std::move(channel));
client.serializationTest(
[] (apache::thrift::ClientReceiveState&& state) {
EXPECT_TRUE(state.exception() == nullptr);
std::string res;
TestServiceAsyncClient::recv_serializationTest(res, state);
EXPECT_EQ(res, string(4096, 'a'));
},
true);
eb.loop();
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
{"hexsha": "742ccc0d27c7e2d8c5a8418d7c1d95b1fce54432", "size": 4853, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "thrift/lib/cpp2/test/HeaderClientChannelHttpTest.cpp", "max_stars_repo_name": "project-zerus/fbthrift", "max_stars_repo_head_hexsha": "fc092e2b645def21482c1772250a97a7cd003cee", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2015-11-23T00:26:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-31T12:56:08.000Z", "max_issues_repo_path": "thrift/lib/cpp2/test/HeaderClientChannelHttpTest.cpp", "max_issues_repo_name": "project-zerus/fbthrift", "max_issues_repo_head_hexsha": "fc092e2b645def21482c1772250a97a7cd003cee", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2017-05-10T15:43:34.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-04T22:36:04.000Z", "max_forks_repo_path": "thrift/lib/cpp2/test/HeaderClientChannelHttpTest.cpp", "max_forks_repo_name": "project-zerus/fbthrift", "max_forks_repo_head_hexsha": "fc092e2b645def21482c1772250a97a7cd003cee", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-09-01T01:30:25.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-04T17:46:24.000Z", "avg_line_length": 34.6642857143, "max_line_length": 80, "alphanum_fraction": 0.7078096023, "num_tokens": 1149}
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from scipy.stats import distributions
__all__ = ('Prior', 'UniformPrior', 'ExpPrior', 'InvGammaPrior', 'BetaPrior',
'LogPrior')
class Prior(object):
"""
Convenience class for handling prior distributions. Prior objects can be
added together and multiplied like lists.
dists: frozen scipy.stats distribution object(s)
"""
def __init__(self, *dists):
self._dists = list(dists)
self._update()
def __len__(self):
return len(self._dists)
def __iter__(self):
return iter(self._dists)
def __getitem__(self, key):
return self._dists[key]
def __add__(self, other):
return Prior(*(self._dists + other._dists))
def __mul__(self, other):
return Prior(*(self._dists * other))
def __rmul__(self, other):
return self.__mul__(other)
def __iadd__(self, other):
self._dists += other._dists
self._update()
return self
def __imul__(self, other):
self._dists *= other
self._update()
return self
def _update(self):
"""
Cache prior ranges.
"""
self._min, self._max = np.array([d.interval(1.) for d in self]).T
def rvs(self, size=1):
"""
Random sample.
"""
if len(self) == 1:
return self[0].rvs(size)
else:
return np.column_stack([dist.rvs(size) for dist in self])
def logpdf(self, x):
"""
Log PDF.
"""
if len(self) == 1:
return self[0].logpdf(x)
else:
if np.any((x < self._min) | (x > self._max)):
return -np.inf
return sum(dist.logpdf(i) for dist, i in zip(self, x))
def __getstate__(self):
return [(dist.dist.name, dist.args, dist.kwds) for dist in self]
def __setstate__(self, state):
self._dists = [getattr(distributions, name)(*args, **kwds)
for name, args, kwds in state]
self._update()
def UniformPrior(low=0., high=1.):
"""
Constant prior over a finite range.
low, high : min, max of range
"""
return Prior(distributions.uniform(loc=low, scale=(high-low)))
def ExpPrior(rate=1.):
"""
Exponential prior.
rate : exponential rate parameter (inverse scale)
"""
return Prior(distributions.expon(scale=1./rate))
def InvGammaPrior(a=5., b=5.):
"""
Inverse gamma prior, e.g. for GP variance.
a : gamma shape parameter
b : scale parameter
"""
return Prior(distributions.invgamma(a, scale=b))
class _beta_mod_gen(distributions.beta.__class__):
"""
A beta distribution modified to work as a Bayesian prior.
The scipy beta dist with b < 1 will generate random samples == 1, but then
will also evaluate beta.pdf(1) == inf, which is probably formally correct
but will definitely not work as a prior. It also doesn't make sense for a
GP length scale to be exactly zero. As a workaround, hack the distribution
to support (0, 1) exclusive instead of [0, 1] inclusive.
"""
# small epsilon > 0
_eps = 1e-4
def _rvs(self, *args):
# coerce random samples to (0, 1) exclusive
rvs = super(distributions.beta.__class__, self)._rvs(*args)
rvs *= 1 - 2*self._eps
rvs += self._eps
return rvs
distributions.beta_mod = _beta_mod_gen(
a=_beta_mod_gen._eps,
b=(1.-_beta_mod_gen._eps),
name='beta_mod'
)
def BetaPrior(a=1., b=0.1):
"""
Beta prior, e.g. for GP length scales (correlation lengths).
a, b : beta shape parameters
"""
return Prior(distributions.beta_mod(a, b))
class _log_gen(distributions.rv_continuous):
def _rvs(self, a):
return np.exp(np.random.uniform(np.log(a), 0, self._size))
def _logpdf(self, x, a):
return -np.log(x) # not normalized!
distributions.logarithmic = _log_gen(a=1e-16, name='logarithmic', shapes='a')
def LogPrior(low=1e-8, high=1.):
"""
Logarithmic (Jeffreys) prior, e.g. for the noise term (nugget).
low, high : range of random samples
Does not affect log probability.
"""
return Prior(distributions.logarithmic(low/high, scale=high))
|
{"hexsha": "4e3abf328c8f671bca7e608462a83cb3fcbc4d55", "size": 4322, "ext": "py", "lang": "Python", "max_stars_repo_path": "mtd/priors.py", "max_stars_repo_name": "jbernhard/mtd", "max_stars_repo_head_hexsha": "6326fdb44f071311ace7862371e658d609f43d08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-02-21T21:20:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-03T15:54:13.000Z", "max_issues_repo_path": "mtd/priors.py", "max_issues_repo_name": "jbernhard/mtd", "max_issues_repo_head_hexsha": "6326fdb44f071311ace7862371e658d609f43d08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mtd/priors.py", "max_forks_repo_name": "jbernhard/mtd", "max_forks_repo_head_hexsha": "6326fdb44f071311ace7862371e658d609f43d08", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2808988764, "max_line_length": 79, "alphanum_fraction": 0.6052753355, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1144}
|
Require Import Coq.Classes.Morphisms.
Require Import Coq.Classes.RelationClasses.
Require Import Logic.lib.Ensembles_ext.
Require Import Logic.GeneralLogic.Base.
Require Import Logic.GeneralLogic.ProofTheory.TheoryOfSequentCalculus.
Require Import Logic.MinimumLogic.Syntax.
Local Open Scope logic_base.
Local Open Scope syntax.
Section PropertiesOfSequentCalculus.
Context (L: Language)
(Gamma: Derivable L)
{minL: MinimumLanguage L}.
Definition DeductionMP: Prop :=
forall (Phi: context) (x y: expr), Phi |--- x -> Phi |--- x --> y -> Phi |--- y.
Definition DeductionImpIntro: Prop :=
forall (Phi: context) (x y: expr), Phi;; x |--- y -> Phi |--- x --> y.
Definition DeductionImpElim: Prop :=
forall (Phi: context) (x y: expr), Phi |--- x --> y -> Phi;; x |--- y.
End PropertiesOfSequentCalculus.
Section TheoryOfSequentCalculus.
Context {L: Language}
{Gamma: Derivable L}
{minL: MinimumLanguage L}.
Lemma DeductionMP_DerivableAssu_DeductionWeaken_2_DeductionImpElim:
DeductionMP L Gamma ->
DerivableAssu L Gamma ->
DeductionWeaken L Gamma ->
DeductionImpElim L Gamma.
Proof.
intros.
intros ? ? ? ?.
eapply H.
+ apply H0.
right.
constructor.
+ eapply H1; [| exact H2].
intros ? ?.
left.
auto.
Qed.
Lemma DeductionImpIntro_DeductionMP_2_DeductionSubst1:
DeductionImpIntro L Gamma ->
DeductionMP L Gamma ->
DeductionSubst1 L Gamma.
Proof.
intros.
intros ? ? ? ? ?.
apply H in H2.
revert H1 H2; apply H0.
Qed.
Lemma DeductionImpElim_DeductionSubst1_2_DeductionMP:
DeductionImpElim L Gamma ->
DeductionSubst1 L Gamma ->
DeductionMP L Gamma.
Proof.
intros.
intros ? ? ? ? ?.
apply H in H2.
revert H1 H2; apply H0.
Qed.
End TheoryOfSequentCalculus.
|
{"author": "QinxiangCao", "repo": "LOGIC", "sha": "d1476d57345c87447ea500b3d5ea99ee6d0f6863", "save_path": "github-repos/coq/QinxiangCao-LOGIC", "path": "github-repos/coq/QinxiangCao-LOGIC/LOGIC-d1476d57345c87447ea500b3d5ea99ee6d0f6863/MinimumLogic/ProofTheory/TheoryOfSequentCalculus.v"}
|
Probabilistic Programming
=====
and Bayesian Methods for Hackers
========
##### Version 0.1
`Original content created by Cam Davidson-Pilon`
`Ported to Python 3 and PyMC3 by Max Margenot (@clean_utensils) and Thomas Wiecki (@twiecki) at Quantopian (@quantopian)`
___
Welcome to *Bayesian Methods for Hackers*. The full Github repository is available at [github/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers](https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers). The other chapters can be found on the project's [homepage](https://camdavidsonpilon.github.io/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/). We hope you enjoy the book, and we encourage any contributions!
Chapter 1
======
***
The Philosophy of Bayesian Inference
------
> You are a skilled programmer, but bugs still slip into your code. After a particularly difficult implementation of an algorithm, you decide to test your code on a trivial example. It passes. You test the code on a harder problem. It passes once again. And it passes the next, *even more difficult*, test too! You are starting to believe that there may be no bugs in this code...
If you think this way, then congratulations, you already are thinking Bayesian! Bayesian inference is simply updating your beliefs after considering new evidence. A Bayesian can rarely be certain about a result, but he or she can be very confident. Just like in the example above, we can never be 100% sure that our code is bug-free unless we test it on every possible problem; something rarely possible in practice. Instead, we can test it on a large number of problems, and if it succeeds we can feel more *confident* about our code, but still not certain. Bayesian inference works identically: we update our beliefs about an outcome; rarely can we be absolutely sure unless we rule out all other alternatives.
### The Bayesian state of mind
Bayesian inference differs from more traditional statistical inference by preserving *uncertainty*. At first, this sounds like a bad statistical technique. Isn't statistics all about deriving *certainty* from randomness? To reconcile this, we need to start thinking like Bayesians.
The Bayesian world-view interprets probability as measure of *believability in an event*, that is, how confident we are in an event occurring. In fact, we will see in a moment that this is the natural interpretation of probability.
For this to be clearer, we consider an alternative interpretation of probability: *Frequentist*, known as the more *classical* version of statistics, assume that probability is the long-run frequency of events (hence the bestowed title). For example, the *probability of plane accidents* under a frequentist philosophy is interpreted as the *long-term frequency of plane accidents*. This makes logical sense for many probabilities of events, but becomes more difficult to understand when events have no long-term frequency of occurrences. Consider: we often assign probabilities to outcomes of presidential elections, but the election itself only happens once! Frequentists get around this by invoking alternative realities and saying across all these realities, the frequency of occurrences defines the probability.
Bayesians, on the other hand, have a more intuitive approach. Bayesians interpret a probability as measure of *belief*, or confidence, of an event occurring. Simply, a probability is a summary of an opinion. An individual who assigns a belief of 0 to an event has no confidence that the event will occur; conversely, assigning a belief of 1 implies that the individual is absolutely certain of an event occurring. Beliefs between 0 and 1 allow for weightings of other outcomes. This definition agrees with the probability of a plane accident example, for having observed the frequency of plane accidents, an individual's belief should be equal to that frequency, excluding any outside information. Similarly, under this definition of probability being equal to beliefs, it is meaningful to speak about probabilities (beliefs) of presidential election outcomes: how confident are you candidate *A* will win?
Notice in the paragraph above, I assigned the belief (probability) measure to an *individual*, not to Nature. This is very interesting, as this definition leaves room for conflicting beliefs between individuals. Again, this is appropriate for what naturally occurs: different individuals have different beliefs of events occurring, because they possess different *information* about the world. The existence of different beliefs does not imply that anyone is wrong. Consider the following examples demonstrating the relationship between individual beliefs and probabilities:
- I flip a coin, and we both guess the result. We would both agree, assuming the coin is fair, that the probability of Heads is 1/2. Assume, then, that I peek at the coin. Now I know for certain what the result is: I assign probability 1.0 to either Heads or Tails (whichever it is). Now what is *your* belief that the coin is Heads? My knowledge of the outcome has not changed the coin's results. Thus we assign different probabilities to the result.
- Your code either has a bug in it or not, but we do not know for certain which is true, though we have a belief about the presence or absence of a bug.
- A medical patient is exhibiting symptoms $x$, $y$ and $z$. There are a number of diseases that could be causing all of them, but only a single disease is present. A doctor has beliefs about which disease, but a second doctor may have slightly different beliefs.
This philosophy of treating beliefs as probability is natural to humans. We employ it constantly as we interact with the world and only see partial truths, but gather evidence to form beliefs. Alternatively, you have to be *trained* to think like a frequentist.
To align ourselves with traditional probability notation, we denote our belief about event $A$ as $P(A)$. We call this quantity the *prior probability*.
John Maynard Keynes, a great economist and thinker, said "When the facts change, I change my mind. What do you do, sir?" This quote reflects the way a Bayesian updates his or her beliefs after seeing evidence. Even — especially — if the evidence is counter to what was initially believed, the evidence cannot be ignored. We denote our updated belief as $P(A |X )$, interpreted as the probability of $A$ given the evidence $X$. We call the updated belief the *posterior probability* so as to contrast it with the prior probability. For example, consider the posterior probabilities (read: posterior beliefs) of the above examples, after observing some evidence $X$:
1\. $P(A): \;\;$ the coin has a 50 percent chance of being Heads. $P(A | X):\;\;$ You look at the coin, observe a Heads has landed, denote this information $X$, and trivially assign probability 1.0 to Heads and 0.0 to Tails.
2\. $P(A): \;\;$ This big, complex code likely has a bug in it. $P(A | X): \;\;$ The code passed all $X$ tests; there still might be a bug, but its presence is less likely now.
3\. $P(A):\;\;$ The patient could have any number of diseases. $P(A | X):\;\;$ Performing a blood test generated evidence $X$, ruling out some of the possible diseases from consideration.
It's clear that in each example we did not completely discard the prior belief after seeing new evidence $X$, but we *re-weighted the prior* to incorporate the new evidence (i.e. we put more weight, or confidence, on some beliefs versus others).
By introducing prior uncertainty about events, we are already admitting that any guess we make is potentially very wrong. After observing data, evidence, or other information, we update our beliefs, and our guess becomes *less wrong*. This is the alternative side of the prediction coin, where typically we try to be *more right*.
### Bayesian Inference in Practice
If frequentist and Bayesian inference were programming functions, with inputs being statistical problems, then the two would be different in what they return to the user. The frequentist inference function would return a number, representing an estimate (typically a summary statistic like the sample average etc.), whereas the Bayesian function would return *probabilities*.
For example, in our debugging problem above, calling the frequentist function with the argument "My code passed all $X$ tests; is my code bug-free?" would return a *YES*. On the other hand, asking our Bayesian function "Often my code has bugs. My code passed all $X$ tests; is my code bug-free?" would return something very different: probabilities of *YES* and *NO*. The function might return:
> *YES*, with probability 0.8; *NO*, with probability 0.2
This is very different from the answer the frequentist function returned. Notice that the Bayesian function accepted an additional argument: *"Often my code has bugs"*. This parameter is the *prior*. By including the prior parameter, we are telling the Bayesian function to include our belief about the situation. Technically this parameter in the Bayesian function is optional, but we will see excluding it has its own consequences.
#### Incorporating evidence
As we acquire more and more instances of evidence, our prior belief is *washed out* by the new evidence. This is to be expected. For example, if your prior belief is something ridiculous, like "I expect the sun to explode today", and each day you are proved wrong, you would hope that any inference would correct you, or at least align your beliefs better. Bayesian inference will correct this belief.
Denote $N$ as the number of instances of evidence we possess. As we gather an *infinite* amount of evidence, say as $N \rightarrow \infty$, our Bayesian results (often) align with frequentist results. Hence for large $N$, statistical inference is more or less objective. On the other hand, for small $N$, inference is much more *unstable*: frequentist estimates have more variance and larger confidence intervals. This is where Bayesian analysis excels. By introducing a prior, and returning probabilities (instead of a scalar estimate), we *preserve the uncertainty* that reflects the instability of statistical inference of a small $N$ dataset.
One may think that for large $N$, one can be indifferent between the two techniques since they offer similar inference, and might lean towards the computationally-simpler, frequentist methods. An individual in this position should consider the following quote by Andrew Gelman (2005)[1], before making such a decision:
> Sample sizes are never large. If $N$ is too small to get a sufficiently-precise estimate, you need to get more data (or make more assumptions). But once $N$ is "large enough," you can start subdividing the data to learn more (for example, in a public opinion poll, once you have a good estimate for the entire country, you can estimate among men and women, northerners and southerners, different age groups, etc.). $N$ is never enough because if it were "enough" you'd already be on to the next problem for which you need more data.
### Are frequentist methods incorrect then?
**No.**
Frequentist methods are still useful or state-of-the-art in many areas. Tools such as least squares linear regression, LASSO regression, and expectation-maximization algorithms are all powerful and fast. Bayesian methods complement these techniques by solving problems that these approaches cannot, or by illuminating the underlying system with more flexible modeling.
#### A note on *Big Data*
Paradoxically, big data's predictive analytic problems are actually solved by relatively simple algorithms [2][4]. Thus we can argue that big data's prediction difficulty does not lie in the algorithm used, but instead on the computational difficulties of storage and execution on big data. (One should also consider Gelman's quote from above and ask "Do I really have big data?")
The much more difficult analytic problems involve *medium data* and, especially troublesome, *really small data*. Using a similar argument as Gelman's above, if big data problems are *big enough* to be readily solved, then we should be more interested in the *not-quite-big enough* datasets.
### Our Bayesian framework
We are interested in beliefs, which can be interpreted as probabilities by thinking Bayesian. We have a *prior* belief in event $A$, beliefs formed by previous information, e.g., our prior belief about bugs being in our code before performing tests.
Secondly, we observe our evidence. To continue our buggy-code example: if our code passes $X$ tests, we want to update our belief to incorporate this. We call this new belief the *posterior* probability. Updating our belief is done via the following equation, known as Bayes' Theorem, after its discoverer Thomas Bayes:
\begin{align}
P( A | X ) = & \frac{ P(X | A) P(A) } {P(X) } \\\\[5pt]
& \propto P(X | A) P(A)\;\; (\propto \text{is proportional to })
\end{align}
The above formula is not unique to Bayesian inference: it is a mathematical fact with uses outside Bayesian inference. Bayesian inference merely uses it to connect prior probabilities $P(A)$ with an updated posterior probabilities $P(A | X )$.
##### Example: Mandatory coin-flip example
Every statistics text must contain a coin-flipping example, I'll use it here to get it out of the way. Suppose, naively, that you are unsure about the probability of heads in a coin flip (spoiler alert: it's 50%). You believe there is some true underlying ratio, call it $p$, but have no prior opinion on what $p$ might be.
We begin to flip a coin, and record the observations: either $H$ or $T$. This is our observed data. An interesting question to ask is how our inference changes as we observe more and more data? More specifically, what do our posterior probabilities look like when we have little data, versus when we have lots of data.
Below we plot a sequence of updating posterior probabilities as we observe increasing amounts of data (coin flips).
```python
"""
The book uses a custom matplotlibrc file, which provides the unique styles for
matplotlib plots. If executing this book, and you wish to use the book's
styling, provided are two options:
1. Overwrite your own matplotlibrc file with the rc-file provided in the
book's styles/ dir. See http://matplotlib.org/users/customizing.html
2. Also in the styles is bmh_matplotlibrc.json file. This can be used to
update the styles in only this notebook. Try running the following code:
import json
s = json.load(open("../styles/bmh_matplotlibrc.json"))
matplotlib.rcParams.update(s)
"""
# The code below can be passed over, as it is currently not important, plus it
# uses advanced topics we have not covered yet. LOOK AT PICTURE, MICHAEL!
%matplotlib inline
from IPython.core.pylabtools import figsize
import numpy as np
from matplotlib import pyplot as plt
figsize(11, 9)
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
import scipy.stats as stats
dist = stats.beta
n_trials = [0, 1, 2, 3, 4, 5, 8, 15, 50, 500]
data = stats.bernoulli.rvs(0.5, size=n_trials[-1])
x = np.linspace(0, 1, 100)
# For the already prepared, I'm using Binomial's conj. prior.
for k, N in enumerate(n_trials):
sx = plt.subplot(len(n_trials)/2, 2, k+1)
plt.xlabel("$p$, probability of heads") \
if k in [0, len(n_trials)-1] else None
plt.setp(sx.get_yticklabels(), visible=False)
heads = data[:N].sum()
y = dist.pdf(x, 1 + heads, 1 + N - heads)
plt.plot(x, y, label="observe %d tosses,\n %d heads" % (N, heads))
plt.fill_between(x, 0, y, color="#348ABD", alpha=0.4)
plt.vlines(0.5, 0, 4, color="k", linestyles="--", lw=1)
leg = plt.legend()
leg.get_frame().set_alpha(0.4)
plt.autoscale(tight=True)
plt.suptitle("Bayesian updating of posterior probabilities",
y=1.02,
fontsize=14)
plt.tight_layout()
```
The posterior probabilities are represented by the curves, and our uncertainty is proportional to the width of the curve. As the plot above shows, as we start to observe data our posterior probabilities start to shift and move around. Eventually, as we observe more and more data (coin-flips), our probabilities will tighten closer and closer around the true value of $p=0.5$ (marked by a dashed line).
Notice that the plots are not always *peaked* at 0.5. There is no reason it should be: recall we assumed we did not have a prior opinion of what $p$ is. In fact, if we observe quite extreme data, say 8 flips and only 1 observed heads, our distribution would look very biased *away* from lumping around 0.5 (with no prior opinion, how confident would you feel betting on a fair coin after observing 8 tails and 1 head?). As more data accumulates, we would see more and more probability being assigned at $p=0.5$, though never all of it.
The next example is a simple demonstration of the mathematics of Bayesian inference.
##### Example: Bug, or just sweet, unintended feature?
Let $A$ denote the event that our code has **no bugs** in it. Let $X$ denote the event that the code passes all debugging tests. For now, we will leave the prior probability of no bugs as a variable, i.e. $P(A) = p$.
We are interested in $P(A|X)$, i.e. the probability of no bugs, given our debugging tests $X$. To use the formula above, we need to compute some quantities.
What is $P(X | A)$, i.e., the probability that the code passes $X$ tests *given* there are no bugs? Well, it is equal to 1, for a code with no bugs will pass all tests.
$P(X)$ is a little bit trickier: The event $X$ can be divided into two possibilities, event $X$ occurring even though our code *indeed has* bugs (denoted $\sim A\;$, spoken *not $A$*), or event $X$ without bugs ($A$). $P(X)$ can be represented as:
\begin{align}
P(X ) & = P(X \text{ and } A) + P(X \text{ and } \sim A) \\\\[5pt]
& = P(X|A)P(A) + P(X | \sim A)P(\sim A)\\\\[5pt]
& = P(X|A)p + P(X | \sim A)(1-p)
\end{align}
We have already computed $P(X|A)$ above. On the other hand, $P(X | \sim A)$ is subjective: our code can pass tests but still have a bug in it, though the probability there is a bug present is reduced. Note this is dependent on the number of tests performed, the degree of complication in the tests, etc. Let's be conservative and assign $P(X|\sim A) = 0.5$. Then
\begin{align}
P(A | X) & = \frac{1\cdot p}{ 1\cdot p +0.5 (1-p) } \\\\
& = \frac{ 2 p}{1+p}
\end{align}
This is the posterior probability. What does it look like as a function of our prior, $p \in [0,1]$?
```python
figsize(12.5, 4)
p = np.linspace(0, 1, 50)
plt.plot(p, 2*p/(1+p), color="#348ABD", lw=3)
#plt.fill_between(p, 2*p/(1+p), alpha=.5, facecolor=["#A60628"])
plt.scatter(0.2, 2*(0.2)/1.2, s=140, c="#348ABD")
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel("Prior, $P(A) = p$")
plt.ylabel("Posterior, $P(A|X)$, with $P(A) = p$")
plt.title("Are there bugs in my code?");
```
We can see the biggest gains if we observe the $X$ tests passed when the prior probability, $p$, is low. Let's settle on a specific value for the prior. I'm a strong programmer (I think), so I'm going to give myself a realistic prior of 0.20, that is, there is a 20% chance that I write code bug-free. To be more realistic, this prior should be a function of how complicated and large the code is, but let's pin it at 0.20. Then my updated belief that my code is bug-free is 0.33.
Recall that the prior is a probability: $p$ is the prior probability that there *are no bugs*, so $1-p$ is the prior probability that there *are bugs*.
Similarly, our posterior is also a probability, with $P(A | X)$ the probability there is no bug *given we saw all tests pass*, hence $1-P(A|X)$ is the probability there is a bug *given all tests passed*. What does our posterior probability look like? Below is a chart of both the prior and the posterior probabilities.
```python
figsize(12.5, 4)
colours = ["#348ABD", "#A60628"]
prior = [0.20, 0.80]
posterior = [1./3, 2./3]
plt.bar([0, .7], prior, alpha=0.70, width=0.25,
color=colours[0], label="prior distribution",
lw="3", edgecolor=colours[0])
plt.bar([0+0.25, .7+0.25], posterior, alpha=0.7,
width=0.25, color=colours[1],
label="posterior distribution",
lw="3", edgecolor=colours[1])
plt.xticks([0.20, .95], ["Bugs Absent", "Bugs Present"])
plt.title("Prior and Posterior probability of bugs present")
plt.ylabel("Probability")
plt.legend(loc="upper left");
```
Notice that after we observed $X$ occur, the probability of bugs being absent increased. By increasing the number of tests, we can approach confidence (probability 1) that there are no bugs present.
This was a very simple example of Bayesian inference and Bayes rule. Unfortunately, the mathematics necessary to perform more complicated Bayesian inference only becomes more difficult, except for artificially constructed cases. We will later see that this type of mathematical analysis is actually unnecessary. First we must broaden our modeling tools. The next section deals with *probability distributions*. If you are already familiar, feel free to skip (or at least skim), but for the less familiar the next section is essential.
_______
## Probability Distributions
**Let's quickly recall what a probability distribution is:** Let $Z$ be some random variable. Then associated with $Z$ is a *probability distribution function* that assigns probabilities to the different outcomes $Z$ can take. Graphically, a probability distribution is a curve where the probability of an outcome is proportional to the height of the curve. You can see examples in the first figure of this chapter.
We can divide random variables into three classifications:
- **$Z$ is discrete**: Discrete random variables may only assume values on a specified list. Things like populations, movie ratings, and number of votes are all discrete random variables. Discrete random variables become more clear when we contrast them with...
- **$Z$ is continuous**: Continuous random variable can take on arbitrarily exact values. For example, temperature, speed, time, color are all modeled as continuous variables because you can progressively make the values more and more precise.
- **$Z$ is mixed**: Mixed random variables assign probabilities to both discrete and continuous random variables, i.e. it is a combination of the above two categories.
### Discrete Case
If $Z$ is discrete, then its distribution is called a *probability mass function*, which measures the probability $Z$ takes on the value $k$, denoted $P(Z=k)$. Note that the probability mass function completely describes the random variable $Z$, that is, if we know the mass function, we know how $Z$ should behave. There are popular probability mass functions that consistently appear: we will introduce them as needed, but let's introduce the first very useful probability mass function. We say $Z$ is *Poisson*-distributed if:
$$P(Z = k) =\frac{ \lambda^k e^{-\lambda} }{k!}, \; \; k=0,1,2, \dots $$
$\lambda$ is called a parameter of the distribution, and it controls the distribution's shape. For the Poisson distribution, $\lambda$ can be any positive number. By increasing $\lambda$, we add more probability to larger values, and conversely by decreasing $\lambda$ we add more probability to smaller values. One can describe $\lambda$ as the *intensity* of the Poisson distribution.
Unlike $\lambda$, which can be any positive number, the value $k$ in the above formula must be a non-negative integer, i.e., $k$ must take on values 0,1,2, and so on. This is very important, because if you wanted to model a population you could not make sense of populations with 4.25 or 5.612 members.
If a random variable $Z$ has a Poisson mass distribution, we denote this by writing
$$Z \sim \text{Poi}(\lambda) $$
One useful property of the Poisson distribution is that its expected value is equal to its parameter, i.e.:
$$E\large[ \;Z\; | \; \lambda \;\large] = \lambda $$
We will use this property often, so it's useful to remember. Below, we plot the probability mass distribution for different $\lambda$ values. The first thing to notice is that by increasing $\lambda$, we add more probability of larger values occurring. Second, notice that although the graph ends at 15, the distributions do not. They assign positive probability to every non-negative integer.
```python
figsize(12.5, 4)
import scipy.stats as stats
a = np.arange(16)
poi = stats.poisson
lambda_ = [1.5, 4.25]
colours = ["#348ABD", "#A60628"]
plt.bar(a, poi.pmf(a, lambda_[0]), color=colours[0],
label="$\lambda = %.1f$" % lambda_[0], alpha=0.60,
edgecolor=colours[0], lw="3")
plt.bar(a, poi.pmf(a, lambda_[1]), color=colours[1],
label="$\lambda = %.1f$" % lambda_[1], alpha=0.60,
edgecolor=colours[1], lw="3")
plt.xticks(a + 0.4, a)
plt.legend()
plt.ylabel("probability of $k$")
plt.xlabel("$k$")
plt.title("Probability mass function of a Poisson random variable; differing \
$\lambda$ values");
```
### Continuous Case
Instead of a probability mass function, a continuous random variable has a *probability density function*. This might seem like unnecessary nomenclature, but the density function and the mass function are very different creatures. An example of continuous random variable is a random variable with *exponential density*. The density function for an exponential random variable looks like this:
$$f_Z(z | \lambda) = \lambda e^{-\lambda z }, \;\; z\ge 0$$
Like a Poisson random variable, an exponential random variable can take on only non-negative values. But unlike a Poisson variable, the exponential can take on *any* non-negative values, including non-integral values such as 4.25 or 5.612401. This property makes it a poor choice for count data, which must be an integer, but a great choice for time data, temperature data (measured in Kelvins, of course), or any other precise *and positive* variable. The graph below shows two probability density functions with different $\lambda$ values.
When a random variable $Z$ has an exponential distribution with parameter $\lambda$, we say *$Z$ is exponential* and write
$$Z \sim \text{Exp}(\lambda)$$
Given a specific $\lambda$, the expected value of an exponential random variable is equal to the inverse of $\lambda$, that is:
$$E[\; Z \;|\; \lambda \;] = \frac{1}{\lambda}$$
```python
a = np.linspace(0, 4, 100)
expo = stats.expon
lambda_ = [0.5, 1]
for l, c in zip(lambda_, colours):
plt.plot(a, expo.pdf(a, scale=1./l), lw=3,
color=c, label="$\lambda = %.1f$" % l)
plt.fill_between(a, expo.pdf(a, scale=1./l), color=c, alpha=.33)
plt.legend()
plt.ylabel("PDF at $z$")
plt.xlabel("$z$")
plt.ylim(0,1.2)
plt.title("Probability density function of an Exponential random variable;\
differing $\lambda$");
```
### But what is $\lambda \;$?
**This question is what motivates statistics**. In the real world, $\lambda$ is hidden from us. We see only $Z$, and must go backwards to try and determine $\lambda$. The problem is difficult because there is no one-to-one mapping from $Z$ to $\lambda$. Many different methods have been created to solve the problem of estimating $\lambda$, but since $\lambda$ is never actually observed, no one can say for certain which method is best!
Bayesian inference is concerned with *beliefs* about what $\lambda$ might be. Rather than try to guess $\lambda$ exactly, we can only talk about what $\lambda$ is likely to be by assigning a probability distribution to $\lambda$.
This might seem odd at first. After all, $\lambda$ is fixed; it is not (necessarily) random! How can we assign probabilities to values of a non-random variable? Ah, we have fallen for our old, frequentist way of thinking. Recall that under Bayesian philosophy, we *can* assign probabilities if we interpret them as beliefs. And it is entirely acceptable to have *beliefs* about the parameter $\lambda$.
##### Example: Inferring behaviour from text-message data
Let's try to model a more interesting example, one that concerns the rate at which a user sends and receives text messages:
> You are given a series of daily text-message counts from a user of your system. The data, plotted over time, appears in the chart below. You are curious to know if the user's text-messaging habits have changed over time, either gradually or suddenly. How can you model this? (This is in fact my own text-message data. Judge my popularity as you wish.)
```python
figsize(12.5, 3.5)
count_data = np.loadtxt("data/txtdata.csv")
n_count_data = len(count_data)
plt.bar(np.arange(n_count_data), count_data, color="#348ABD")
plt.xlabel("Time (days)")
plt.ylabel("count of text-msgs received")
plt.title("Did the user's texting habits change over time?")
plt.xlim(0, n_count_data);
```
Before we start modeling, see what you can figure out just by looking at the chart above. Would you say there was a change in behaviour during this time period?
How can we start to model this? Well, as we have conveniently already seen, a Poisson random variable is a very appropriate model for this type of *count* data. Denoting day $i$'s text-message count by $C_i$,
$$ C_i \sim \text{Poisson}(\lambda) $$
We are not sure what the value of the $\lambda$ parameter really is, however. Looking at the chart above, it appears that the rate might become higher late in the observation period, which is equivalent to saying that $\lambda$ increases at some point during the observations. (Recall that a higher value of $\lambda$ assigns more probability to larger outcomes. That is, there is a higher probability of many text messages having been sent on a given day.)
How can we represent this observation mathematically? Let's assume that on some day during the observation period (call it $\tau$), the parameter $\lambda$ suddenly jumps to a higher value. So we really have two $\lambda$ parameters: one for the period before $\tau$, and one for the rest of the observation period. In the literature, a sudden transition like this would be called a *switchpoint*:
$$
\lambda =
\begin{cases}
\lambda_1 & \text{if } t \lt \tau \cr
\lambda_2 & \text{if } t \ge \tau
\end{cases}
$$
If, in reality, no sudden change occurred and indeed $\lambda_1 = \lambda_2$, then the $\lambda$s posterior distributions should look about equal.
We are interested in inferring the unknown $\lambda$s. To use Bayesian inference, we need to assign prior probabilities to the different possible values of $\lambda$. What would be good prior probability distributions for $\lambda_1$ and $\lambda_2$? Recall that $\lambda$ can be any positive number. As we saw earlier, the *exponential* distribution provides a continuous density function for positive numbers, so it might be a good choice for modeling $\lambda_i$. But recall that the exponential distribution takes a parameter of its own, so we'll need to include that parameter in our model. Let's call that parameter $\alpha$.
\begin{align}
&\lambda_1 \sim \text{Exp}( \alpha ) \\\
&\lambda_2 \sim \text{Exp}( \alpha )
\end{align}
$\alpha$ is called a *hyper-parameter* or *parent variable*. In literal terms, it is a parameter that influences other parameters. Our initial guess at $\alpha$ does not influence the model too strongly, so we have some flexibility in our choice. A good rule of thumb is to set the exponential parameter equal to the inverse of the average of the count data. Since we're modeling $\lambda$ using an exponential distribution, we can use the expected value identity shown earlier to get:
$$\frac{1}{N}\sum_{i=0}^N \;C_i \approx E[\; \lambda \; |\; \alpha ] = \frac{1}{\alpha}$$
An alternative, and something I encourage the reader to try, would be to have two priors: one for each $\lambda_i$. Creating two exponential distributions with different $\alpha$ values reflects our prior belief that the rate changed at some point during the observations.
What about $\tau$? Because of the noisiness of the data, it's difficult to pick out a priori when $\tau$ might have occurred. Instead, we can assign a *uniform prior belief* to every possible day. This is equivalent to saying
\begin{align}
& \tau \sim \text{DiscreteUniform(1,70) }\\\\
& \Rightarrow P( \tau = k ) = \frac{1}{70}
\end{align}
So after all this, what does our overall prior distribution for the unknown variables look like? Frankly, *it doesn't matter*. What we should understand is that it's an ugly, complicated mess involving symbols only a mathematician could love. And things will only get uglier the more complicated our models become. Regardless, all we really care about is the posterior distribution.
We next turn to PyMC3, a Python library for performing Bayesian analysis that is undaunted by the mathematical monster we have created.
Introducing our first hammer: PyMC3
-----
PyMC3 is a Python library for programming Bayesian analysis [3]. It is a fast, well-maintained library. The only unfortunate part is that its documentation is lacking in certain areas, especially those that bridge the gap between beginner and hacker. One of this book's main goals is to solve that problem, and also to demonstrate why PyMC3 is so cool.
We will model the problem above using PyMC3. This type of programming is called *probabilistic programming*, an unfortunate misnomer that invokes ideas of randomly-generated code and has likely confused and frightened users away from this field. The code is not random; it is probabilistic in the sense that we create probability models using programming variables as the model's components. Model components are first-class primitives within the PyMC3 framework.
B. Cronin [5] has a very motivating description of probabilistic programming:
> Another way of thinking about this: unlike a traditional program, which only runs in the forward directions, a probabilistic program is run in both the forward and backward direction. It runs forward to compute the consequences of the assumptions it contains about the world (i.e., the model space it represents), but it also runs backward from the data to constrain the possible explanations. In practice, many probabilistic programming systems will cleverly interleave these forward and backward operations to efficiently home in on the best explanations.
Because of the confusion engendered by the term *probabilistic programming*, I'll refrain from using it. Instead, I'll simply say *programming*, since that's what it really is.
PyMC3 code is easy to read. The only novel thing should be the syntax. Simply remember that we are representing the model's components ($\tau, \lambda_1, \lambda_2$ ) as variables.
```python
import pymc3 as pm
import theano.tensor as tt
with pm.Model() as model:
alpha = 1.0/count_data.mean() # Recall count_data is the
# variable that holds our txt counts
lambda_1 = pm.Exponential("lambda_1", alpha)
lambda_2 = pm.Exponential("lambda_2", alpha)
tau = pm.DiscreteUniform("tau", lower=0, upper=n_count_data - 1)
```
In the code above, we create the PyMC3 variables corresponding to $\lambda_1$ and $\lambda_2$. We assign them to PyMC3's *stochastic variables*, so-called because they are treated by the back end as random number generators.
```python
with model:
idx = np.arange(n_count_data) # Index
lambda_ = pm.math.switch(tau > idx, lambda_1, lambda_2)
```
```python
lambda_
```
Elemwise{switch,no_inplace}.0
This code creates a new function `lambda_`, but really we can think of it as a random variable: the random variable $\lambda$ from above. The `switch()` function assigns `lambda_1` or `lambda_2` as the value of `lambda_`, depending on what side of `tau` we are on. The values of `lambda_` up until `tau` are `lambda_1` and the values afterwards are `lambda_2`.
Note that because `lambda_1`, `lambda_2` and `tau` are random, `lambda_` will be random. We are **not** fixing any variables yet.
```python
with model:
observation = pm.Poisson("obs", lambda_, observed=count_data)
```
The variable `observation` combines our data, `count_data`, with our proposed data-generation scheme, given by the variable `lambda_`, through the `observed` keyword.
The code below will be explained in Chapter 3, but I show it here so you can see where our results come from. One can think of it as a *learning* step. The machinery being employed is called *Markov Chain Monte Carlo* (MCMC), which I also delay explaining until Chapter 3. This technique returns thousands of random variables from the posterior distributions of $\lambda_1, \lambda_2$ and $\tau$. We can plot a histogram of the random variables to see what the posterior distributions look like. Below, we collect the samples (called *traces* in the MCMC literature) into histograms.
```python
### Mysterious code to be explained in Chapter 3.
with model:
step = pm.Metropolis()
trace = pm.sample(10000, tune=5000,step=step)
```
Multiprocess sampling (4 chains in 4 jobs)
CompoundStep
>Metropolis: [tau]
>Metropolis: [lambda_2]
>Metropolis: [lambda_1]
<div>
<style>
/* Turns off some styling */
progress {
/* gets rid of default border in Firefox and Opera. */
border: none;
/* Needs to be in here for Safari polyfill so background images work as expected. */
background-size: auto;
}
.progress-bar-interrupted, .progress-bar-interrupted::-webkit-progress-bar {
background: #F44336;
}
</style>
<progress value='60000' class='' max='60000' style='width:300px; height:20px; vertical-align: middle;'></progress>
100.00% [60000/60000 00:09<00:00 Sampling 4 chains, 0 divergences]
</div>
Sampling 4 chains for 5_000 tune and 10_000 draw iterations (20_000 + 40_000 draws total) took 19 seconds.
The number of effective samples is smaller than 25% for some parameters.
```python
lambda_1_samples = trace['lambda_1']
lambda_2_samples = trace['lambda_2']
tau_samples = trace['tau']
```
```python
figsize(12.5, 10)
#histogram of the samples:
ax = plt.subplot(311)
ax.set_autoscaley_on(False)
plt.hist(lambda_1_samples, histtype='stepfilled', bins=30, alpha=0.85,
label="posterior of $\lambda_1$", color="#A60628", density=True)
plt.legend(loc="upper left")
plt.title(r"""Posterior distributions of the variables
$\lambda_1,\;\lambda_2,\;\tau$""")
plt.xlim([15, 30])
plt.xlabel("$\lambda_1$ value")
ax = plt.subplot(312)
ax.set_autoscaley_on(False)
plt.hist(lambda_2_samples, histtype='stepfilled', bins=30, alpha=0.85,
label="posterior of $\lambda_2$", color="#7A68A6", density=True)
plt.legend(loc="upper left")
plt.xlim([15, 30])
plt.xlabel("$\lambda_2$ value")
plt.subplot(313)
w = 1.0 / tau_samples.shape[0] * np.ones_like(tau_samples)
plt.hist(tau_samples, bins=n_count_data, alpha=1,
label=r"posterior of $\tau$",
color="#467821", weights=w, rwidth=2.)
plt.xticks(np.arange(n_count_data))
plt.legend(loc="upper left")
plt.ylim([0, .75])
plt.xlim([35, len(count_data)-20])
plt.xlabel(r"$\tau$ (in days)")
plt.ylabel("probability")
plt.tight_layout()
```
### Interpretation
Recall that Bayesian methodology returns a *distribution*. Hence we now have distributions to describe the unknown $\lambda$s and $\tau$. What have we gained? Immediately, we can see the uncertainty in our estimates: the wider the distribution, the less certain our posterior belief should be. We can also see what the plausible values for the parameters are: $\lambda_1$ is around 18 and $\lambda_2$ is around 23. The posterior distributions of the two $\lambda$s are clearly distinct, indicating that it is indeed likely that there was a change in the user's text-message behaviour.
What other observations can you make? If you look at the original data again, do these results seem reasonable?
Notice also that the posterior distributions for the $\lambda$s do not look like exponential distributions, even though our priors for these variables were exponential. In fact, the posterior distributions are not really of any form that we recognize from the original model. But that's OK! This is one of the benefits of taking a computational point of view. If we had instead done this analysis using mathematical approaches, we would have been stuck with an analytically intractable (and messy) distribution. Our use of a computational approach makes us indifferent to mathematical tractability.
Our analysis also returned a distribution for $\tau$. Its posterior distribution looks a little different from the other two because it is a discrete random variable, so it doesn't assign probabilities to intervals. We can see that near day 45, there was a 50% chance that the user's behaviour changed. Had no change occurred, or had the change been gradual over time, the posterior distribution of $\tau$ would have been more spread out, reflecting that many days were plausible candidates for $\tau$. By contrast, in the actual results we see that only three or four days make any sense as potential transition points.
### Why would I want samples from the posterior, anyways?
We will deal with this question for the remainder of the book, and it is an understatement to say that it will lead us to some amazing results. For now, let's end this chapter with one more example.
We'll use the posterior samples to answer the following question: what is the expected number of texts at day $t, \; 0 \le t \le 70$ ? Recall that the expected value of a Poisson variable is equal to its parameter $\lambda$. Therefore, the question is equivalent to *what is the expected value of $\lambda$ at time $t$*?
In the code below, let $i$ index samples from the posterior distributions. Given a day $t$, we average over all possible $\lambda_i$ for that day $t$, using $\lambda_i = \lambda_{1,i}$ if $t \lt \tau_i$ (that is, if the behaviour change has not yet occurred), else we use $\lambda_i = \lambda_{2,i}$.
```python
figsize(12.5, 5)
# tau_samples, lambda_1_samples, lambda_2_samples contain
# N samples from the corresponding posterior distribution
N = tau_samples.shape[0]
expected_texts_per_day = np.zeros(n_count_data)
for day in range(0, n_count_data):
# ix is a bool index of all tau samples corresponding to
# the switchpoint occurring prior to value of 'day'
ix = day < tau_samples
# Each posterior sample corresponds to a value for tau.
# for each day, that value of tau indicates whether we're "before"
# (in the lambda1 "regime") or
# "after" (in the lambda2 "regime") the switchpoint.
# by taking the posterior sample of lambda1/2 accordingly, we can average
# over all samples to get an expected value for lambda on that day.
# As explained, the "message count" random variable is Poisson distributed,
# and therefore lambda (the poisson parameter) is the expected value of
# "message count".
expected_texts_per_day[day] = (lambda_1_samples[ix].sum()
+ lambda_2_samples[~ix].sum()) / N
plt.plot(range(n_count_data), expected_texts_per_day, lw=4, color="#E24A33",
label="expected number of text-messages received")
plt.xlim(0, n_count_data)
plt.xlabel("Day")
plt.ylabel("Expected # text-messages")
plt.title("Expected number of text-messages received")
plt.ylim(0, 60)
plt.bar(np.arange(len(count_data)), count_data, color="#348ABD", alpha=0.65,
label="observed texts per day")
plt.legend(loc="upper left");
```
Our analysis shows strong support for believing the user's behavior did change ($\lambda_1$ would have been close in value to $\lambda_2$ had this not been true), and that the change was sudden rather than gradual (as demonstrated by $\tau$'s strongly peaked posterior distribution). We can speculate what might have caused this: a cheaper text-message rate, a recent weather-to-text subscription, or perhaps a new relationship. (In fact, the 45th day corresponds to Christmas, and I moved away to Toronto the next month, leaving a girlfriend behind.)
##### Exercises
1\. Using `lambda_1_samples` and `lambda_2_samples`, what is the mean of the posterior distributions of $\lambda_1$ and $\lambda_2$?
```python
#type your code here.
print(lambda_1_samples.mean())
print(lambda_2_samples.mean())
```
17.759335513669996
22.690660793052064
2\. What is the expected percentage increase in text-message rates? `hint:` compute the mean of `lambda_1_samples/lambda_2_samples`. Note that this quantity is very different from `lambda_1_samples.mean()/lambda_2_samples.mean()`.
```python
#type your code here.
print( (lambda_1_samples / lambda_2_samples).mean() )
print(lambda_1_samples.mean() / lambda_2_samples.mean() )
```
0.7838908068925983
0.7826715879119724
3\. What is the mean of $\lambda_1$ **given** that we know $\tau$ is less than 45. That is, suppose we have been given new information that the change in behaviour occurred prior to day 45. What is the expected value of $\lambda_1$ now? (You do not need to redo the PyMC3 part. Just consider all instances where `tau_samples < 45`.)
```python
#type your code here.
lambda_1_samples[tau_samples < 45].mean()
```
17.750733104781183
```python
lambda_1_samples.mean()
```
17.759335513669996
### References
- [1] Gelman, Andrew. N.p.. Web. 22 Jan 2013. [N is never large enough](http://andrewgelman.com/2005/07/31/n_is_never_larg).
- [2] Norvig, Peter. 2009. [The Unreasonable Effectiveness of Data](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/35179.pdf).
- [3] Salvatier, J, Wiecki TV, and Fonnesbeck C. (2016) Probabilistic programming in Python using PyMC3. *PeerJ Computer Science* 2:e55 <https://doi.org/10.7717/peerj-cs.55>
- [4] Jimmy Lin and Alek Kolcz. Large-Scale Machine Learning at Twitter. Proceedings of the 2012 ACM SIGMOD International Conference on Management of Data (SIGMOD 2012), pages 793-804, May 2012, Scottsdale, Arizona.
- [5] Cronin, Beau. "Why Probabilistic Programming Matters." 24 Mar 2013. Google, Online Posting to Google . Web. 24 Mar. 2013. <https://plus.google.com/u/0/107971134877020469960/posts/KpeRdJKR6Z1>.
```python
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
<style>
@font-face {
font-family: "Computer Modern";
src: url('http://9dbb143991406a7c655e-aa5fcb0a5a4ec34cff238a2d56ca4144.r56.cf5.rackcdn.com/cmunss.otf');
}
@font-face {
font-family: "Computer Modern";
font-weight: bold;
src: url('http://9dbb143991406a7c655e-aa5fcb0a5a4ec34cff238a2d56ca4144.r56.cf5.rackcdn.com/cmunsx.otf');
}
@font-face {
font-family: "Computer Modern";
font-style: oblique;
src: url('http://9dbb143991406a7c655e-aa5fcb0a5a4ec34cff238a2d56ca4144.r56.cf5.rackcdn.com/cmunsi.otf');
}
@font-face {
font-family: "Computer Modern";
font-weight: bold;
font-style: oblique;
src: url('http://9dbb143991406a7c655e-aa5fcb0a5a4ec34cff238a2d56ca4144.r56.cf5.rackcdn.com/cmunso.otf');
}
div.cell{
width:800px;
margin-left:16% !important;
margin-right:auto;
}
h1 {
font-family: Helvetica, serif;
}
h4{
margin-top:12px;
margin-bottom: 3px;
}
div.text_cell_render{
font-family: Computer Modern, "Helvetica Neue", Arial, Helvetica, Geneva, sans-serif;
line-height: 145%;
font-size: 130%;
width:800px;
margin-left:auto;
margin-right:auto;
}
.CodeMirror{
font-family: "Source Code Pro", source-code-pro,Consolas, monospace;
}
.prompt{
display: None;
}
.text_cell_render h5 {
font-weight: 300;
font-size: 22pt;
color: #4057A1;
font-style: italic;
margin-bottom: .5em;
margin-top: 0.5em;
display: block;
}
.warning{
color: rgb( 240, 20, 20 )
}
</style>
```python
```
|
{"hexsha": "cb82f53deed0467a015d6609b77ff2818b7325f7", "size": 359770, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Chapter1_Introduction/Ch1_Introduction_PyMC3.ipynb", "max_stars_repo_name": "jeremymiller00/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers", "max_stars_repo_head_hexsha": "2024638d5936e85c4b40975abc2412d46bb9ac44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapter1_Introduction/Ch1_Introduction_PyMC3.ipynb", "max_issues_repo_name": "jeremymiller00/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers", "max_issues_repo_head_hexsha": "2024638d5936e85c4b40975abc2412d46bb9ac44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter1_Introduction/Ch1_Introduction_PyMC3.ipynb", "max_forks_repo_name": "jeremymiller00/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers", "max_forks_repo_head_hexsha": "2024638d5936e85c4b40975abc2412d46bb9ac44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 305.9268707483, "max_line_length": 91228, "alphanum_fraction": 0.9084081497, "converted": true, "num_tokens": 11769}
|
"""
lvmspec.sky
============
Utility functions to compute a sky model and subtract it.
"""
import numpy as np
from lvmspec.resolution import Resolution
from lvmspec.linalg import cholesky_solve
from lvmspec.linalg import cholesky_solve_and_invert
from lvmspec.linalg import spline_fit
from lvmutil.log import get_logger
from lvmspec import util
from lvmutil import stats as dustat
import scipy,scipy.sparse,scipy.stats,scipy.ndimage
import sys
def compute_sky(frame, nsig_clipping=4.,max_iterations=100,model_ivar=False,add_variance=True) :
"""Compute a sky model.
Input has to correspond to sky fibers only.
Input flux are expected to be flatfielded!
We don't check this in this routine.
Args:
frame : Frame object, which includes attributes
- wave : 1D wavelength grid in Angstroms
- flux : 2D flux[nspec, nwave] density
- ivar : 2D inverse variance of flux
- mask : 2D inverse mask flux (0=good)
- resolution_data : 3D[nspec, ndiag, nwave] (only sky fibers)
nsig_clipping : [optional] sigma clipping value for outlier rejection
Optional:
max_iterations : int , number of iterations
model_ivar : replace ivar by a model to avoid bias due to correlated flux and ivar. this has a negligible effect on sims.
returns SkyModel object with attributes wave, flux, ivar, mask
"""
log=get_logger()
log.info("starting")
# Grab sky fibers on this frame
skyfibers = np.where(frame.fibermap['OBJTYPE'] == 'SKY')[0]
assert np.max(skyfibers) < 500 #- indices, not fiber numbers
nwave=frame.nwave
nfibers=len(skyfibers)
current_ivar=frame.ivar[skyfibers].copy()*(frame.mask[skyfibers]==0)
flux = frame.flux[skyfibers]
Rsky = frame.R[skyfibers]
input_ivar=None
if model_ivar :
log.info("use a model of the inverse variance to remove bias due to correlated ivar and flux")
input_ivar=current_ivar.copy()
median_ivar_vs_wave = np.median(current_ivar,axis=0)
median_ivar_vs_fiber = np.median(current_ivar,axis=1)
median_median_ivar = np.median(median_ivar_vs_fiber)
for f in range(current_ivar.shape[0]) :
threshold=0.01
current_ivar[f] = median_ivar_vs_fiber[f]/median_median_ivar * median_ivar_vs_wave
# keep input ivar for very low weights
ii=(input_ivar[f]<=(threshold*median_ivar_vs_wave))
#log.info("fiber {} keep {}/{} original ivars".format(f,np.sum(ii),current_ivar.shape[1]))
current_ivar[f][ii] = input_ivar[f][ii]
sqrtw=np.sqrt(current_ivar)
sqrtwflux=sqrtw*flux
chi2=np.zeros(flux.shape)
#debug
#nfibers=min(nfibers,2)
nout_tot=0
for iteration in range(max_iterations) :
A=scipy.sparse.lil_matrix((nwave,nwave)).tocsr()
B=np.zeros((nwave))
# diagonal sparse matrix with content = sqrt(ivar)*flat of a given fiber
SD=scipy.sparse.lil_matrix((nwave,nwave))
# loop on fiber to handle resolution
for fiber in range(nfibers) :
if fiber%10==0 :
log.info("iter %d fiber %d"%(iteration,fiber))
R = Rsky[fiber]
# diagonal sparse matrix with content = sqrt(ivar)
SD.setdiag(sqrtw[fiber])
sqrtwR = SD*R # each row r of R is multiplied by sqrtw[r]
A = A+(sqrtwR.T*sqrtwR).tocsr()
B += sqrtwR.T*sqrtwflux[fiber]
log.info("iter %d solving"%iteration)
w = A.diagonal()>0
A_pos_def = A.todense()[w,:]
A_pos_def = A_pos_def[:,w]
skyflux = B*0
try:
skyflux[w]=cholesky_solve(A_pos_def,B[w])
except:
log.info("cholesky failed, trying svd in iteration {}".format(iteration))
skyflux[w]=np.linalg.lstsq(A_pos_def,B[w])[0]
log.info("iter %d compute chi2"%iteration)
for fiber in range(nfibers) :
S = Rsky[fiber].dot(skyflux)
chi2[fiber]=current_ivar[fiber]*(flux[fiber]-S)**2
log.info("rejecting")
nout_iter=0
if iteration<1 :
# only remove worst outlier per wave
# apply rejection iteratively, only one entry per wave among fibers
# find waves with outlier (fastest way)
nout_per_wave=np.sum(chi2>nsig_clipping**2,axis=0)
selection=np.where(nout_per_wave>0)[0]
for i in selection :
worst_entry=np.argmax(chi2[:,i])
current_ivar[worst_entry,i]=0
sqrtw[worst_entry,i]=0
sqrtwflux[worst_entry,i]=0
nout_iter += 1
else :
# remove all of them at once
bad=(chi2>nsig_clipping**2)
current_ivar *= (bad==0)
sqrtw *= (bad==0)
sqrtwflux *= (bad==0)
nout_iter += np.sum(bad)
nout_tot += nout_iter
sum_chi2=float(np.sum(chi2))
ndf=int(np.sum(chi2>0)-nwave)
chi2pdf=0.
if ndf>0 :
chi2pdf=sum_chi2/ndf
log.info("iter #%d chi2=%f ndf=%d chi2pdf=%f nout=%d"%(iteration,sum_chi2,ndf,chi2pdf,nout_iter))
if nout_iter == 0 :
break
log.info("nout tot=%d"%nout_tot)
# no need restore original ivar to compute model error when modeling ivar
# the sky inverse variances are very similar
# solve once again to get deconvolved sky variance
try :
unused_skyflux,skycovar=cholesky_solve_and_invert(A.todense(),B)
except np.linalg.linalg.LinAlgError :
log.warning("cholesky_solve_and_invert failed, switching to np.linalg.lstsq and np.linalg.pinv")
#skyflux = np.linalg.lstsq(A.todense(),B)[0]
skycovar = np.linalg.pinv(A.todense())
#- sky inverse variance, but incomplete and not needed anyway
# skyvar=np.diagonal(skycovar)
# skyivar=(skyvar>0)/(skyvar+(skyvar==0))
# Use diagonal of skycovar convolved with mean resolution of all fibers
# first compute average resolution
mean_res_data=np.mean(frame.resolution_data,axis=0)
R = Resolution(mean_res_data)
# compute convolved sky and ivar
cskycovar=R.dot(skycovar).dot(R.T.todense())
cskyvar=np.diagonal(cskycovar)
cskyivar=(cskyvar>0)/(cskyvar+(cskyvar==0))
# convert cskyivar to 2D; today it is the same for all spectra,
# but that may not be the case in the future
cskyivar = np.tile(cskyivar, frame.nspec).reshape(frame.nspec, nwave)
# Convolved sky
cskyflux = np.zeros(frame.flux.shape)
for i in range(frame.nspec):
cskyflux[i] = frame.R[i].dot(skyflux)
# look at chi2 per wavelength and increase sky variance to reach chi2/ndf=1
if skyfibers.size > 1 and add_variance :
log.info("Add a model error due to wavelength solution noise")
tivar = util.combine_ivar(frame.ivar[skyfibers], cskyivar[skyfibers])
# the chi2 at a given wavelength can be large because on a cosmic
# and not a psf error or sky non uniformity
# so we need to consider only waves for which
# a reasonable sky model error can be computed
# mean sky
msky = np.mean(cskyflux,axis=0)
dwave = np.mean(np.gradient(frame.wave))
dskydw = np.zeros(msky.shape)
dskydw[1:-1]=(msky[2:]-msky[:-2])/(frame.wave[2:]-frame.wave[:-2])
dskydw = np.abs(dskydw)
# now we consider a worst possible sky model error (20% error on flat, 0.5A )
max_possible_var = 1./(tivar+(tivar==0)) + (0.2*msky)**2 + (0.5*dskydw)**2
# exclude residuals inconsistent with this max possible variance (at 3 sigma)
bad = (frame.flux[skyfibers]-cskyflux[skyfibers])**2 > 3**2*max_possible_var
tivar[bad]=0
ndata = np.sum(tivar>0,axis=0)
ok=np.where(ndata>1)[0]
print("ok.size=",ok.size)
chi2 = np.zeros(frame.wave.size)
chi2[ok] = np.sum(tivar*(frame.flux[skyfibers]-cskyflux[skyfibers])**2,axis=0)[ok]/(ndata[ok]-1)
chi2[ndata<=1] = 1. # default
# now we are going to evaluate a sky model error based on this chi2,
# but only around sky flux peaks (>0.1*max)
tmp = np.zeros(frame.wave.size)
tmp = (msky[1:-1]>msky[2:])*(msky[1:-1]>msky[:-2])*(msky[1:-1]>0.1*np.max(msky))
peaks = np.where(tmp)[0]+1
dpix = int(np.ceil(3/dwave)) # +- n Angstrom around each peak
skyvar = 1./(cskyivar+(cskyivar==0))
# loop on peaks
for peak in peaks :
b=peak-dpix
e=peak+dpix+1
mchi2 = np.mean(chi2[b:e]) # mean reduced chi2 around peak
mndata = np.mean(ndata[b:e]) # mean number of fibers contributing
# sky model variance = sigma_flat * msky + sigma_wave * dmskydw
sigma_flat=0.000 # the fiber flat error is already included in the flux ivar
sigma_wave=0.005 # A, minimum value
res2=(frame.flux[skyfibers,b:e]-cskyflux[skyfibers,b:e])**2
var=1./(tivar[:,b:e]+(tivar[:,b:e]==0))
nd=np.sum(tivar[:,b:e]>0)
while(sigma_wave<2) :
pivar=1./(var+(sigma_flat*msky[b:e])**2+(sigma_wave*dskydw[b:e])**2)
pchi2=np.sum(pivar*res2)/nd
if pchi2<=1 :
log.info("peak at {}A : sigma_wave={}".format(int(frame.wave[peak]),sigma_wave))
skyvar[:,b:e] += ( (sigma_flat*msky[b:e])**2 + (sigma_wave*dskydw[b:e])**2 )
break
sigma_wave += 0.005
modified_cskyivar = (cskyivar>0)/skyvar
else :
modified_cskyivar = cskyivar.copy()
# need to do better here
mask = (cskyivar==0).astype(np.uint32)
return SkyModel(frame.wave.copy(), cskyflux, modified_cskyivar, mask,
nrej=nout_tot, stat_ivar = cskyivar) # keep a record of the statistical ivar for QA
class SkyModel(object):
def __init__(self, wave, flux, ivar, mask, header=None, nrej=0, stat_ivar=None):
"""Create SkyModel object
Args:
wave : 1D[nwave] wavelength in Angstroms
flux : 2D[nspec, nwave] sky model to subtract
ivar : 2D[nspec, nwave] inverse variance of the sky model
mask : 2D[nspec, nwave] 0=ok or >0 if problems; 32-bit
header : (optional) header from FITS file HDU0
nrej : (optional) Number of rejected pixels in fit
All input arguments become attributes
"""
assert wave.ndim == 1
assert flux.ndim == 2
assert ivar.shape == flux.shape
assert mask.shape == flux.shape
self.nspec, self.nwave = flux.shape
self.wave = wave
self.flux = flux
self.ivar = ivar
self.mask = util.mask32(mask)
self.header = header
self.nrej = nrej
self.stat_ivar = stat_ivar
def subtract_sky(frame, skymodel) :
"""Subtract skymodel from frame, altering frame.flux, .ivar, and .mask
Args:
frame : lvmspec.Frame object
skymodel : lvmspec.SkyModel object
"""
assert frame.nspec == skymodel.nspec
assert frame.nwave == skymodel.nwave
log=get_logger()
log.info("starting")
# check same wavelength, die if not the case
if not np.allclose(frame.wave, skymodel.wave):
message = "frame and sky not on same wavelength grid"
log.error(message)
raise ValueError(message)
frame.flux -= skymodel.flux
frame.ivar = util.combine_ivar(frame.ivar, skymodel.ivar)
frame.mask |= skymodel.mask
log.info("done")
def qa_skysub(param, frame, skymodel, quick_look=False):
"""Calculate QA on SkySubtraction
Note: Pixels rejected in generating the SkyModel (as above), are
not rejected in the stats calculated here. Would need to carry
along current_ivar to do so.
Args:
param : dict of QA parameters : see qa_frame.init_skysub for example
frame : lvmspec.Frame object; Should have been flat fielded
skymodel : lvmspec.SkyModel object
quick_look : bool, optional
If True, do QuickLook specific QA (or avoid some)
Returns:
qadict: dict of QA outputs
Need to record simple Python objects for yaml (str, float, int)
"""
from lvmspec.qa import qalib
import copy
#- QAs
#- first subtract sky to get the sky subtracted frame. This is only for QA. Pipeline does it separately.
tempframe=copy.deepcopy(frame) #- make a copy so as to propagate frame unaffected so that downstream pipeline uses it.
subtract_sky(tempframe,skymodel) #- Note: sky subtract is done to get residuals. As part of pipeline it is done in fluxcalib stage
# Sky residuals first
qadict = qalib.sky_resid(param, tempframe, skymodel, quick_look=quick_look)
# Sky continuum
if not quick_look: # Sky continuum is measured after flat fielding in QuickLook
channel = frame.meta['CAMERA'][0]
wrange1, wrange2 = param[channel.upper()+'_CONT']
skyfiber, contfiberlow, contfiberhigh, meancontfiber, skycont = qalib.sky_continuum(frame,wrange1,wrange2)
qadict["SKYFIBERID"] = skyfiber.tolist()
qadict["SKYCONT"] = skycont
qadict["SKYCONT_FIBER"] = meancontfiber
if quick_look: # The following can be a *large* dict
qadict_snr = qalib.SignalVsNoise(tempframe,param)
qadict.update(qadict_snr)
return qadict
|
{"hexsha": "b5ddfc7a58617b51521fba2b92a9687c3eebfffb", "size": 13479, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/lvmspec/sky.py", "max_stars_repo_name": "sdss/lvmspec", "max_stars_repo_head_hexsha": "befd6991537c4947fdf63ca262937f2bb845148f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "py/lvmspec/sky.py", "max_issues_repo_name": "sdss/lvmspec", "max_issues_repo_head_hexsha": "befd6991537c4947fdf63ca262937f2bb845148f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py/lvmspec/sky.py", "max_forks_repo_name": "sdss/lvmspec", "max_forks_repo_head_hexsha": "befd6991537c4947fdf63ca262937f2bb845148f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2348066298, "max_line_length": 134, "alphanum_fraction": 0.6280139476, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3753}
|
import networkx as nx
def get_neighbors(G, v):
neighbors = list()
for n in G.neighbors(v):
weight = 1
try:
weight = G[v][n]['weight']
except Exception as ex:
print(ex, 'Hier')
finally:
neighbors.append((n, weight))
return neighbors
def dijkstra_original(G, source, target, excluded_vertices=None):
try:
if source in excluded_vertices:
raise nx.NetworkXNoPath(f"Source {source} can not be excluded!")
if target in excluded_vertices:
raise nx.NetworkXNoPath(f"Target {target} can not be excluded!")
except Exception as e:
raise e
# Raise exception if source or target are excluded
if source in excluded_vertices or target in excluded_vertices:
raise nx.NetworkXNoPath
# Init distances with infinite
distances = {v: float('inf') for v in G.nodes}
# Swap source and target so the distances are calculated from the target and not the source
# This is important for the path building step
swap = [source, target]
source = swap[1]
distances[source] = 0
vertices = list(G.nodes)
# Calculate the distance of each node to the target
while len(vertices) > 0:
v = min(vertices, key=lambda u: distances[u])
vertices.remove(v)
if distances[v] == float('inf'):
break
for neighbor, weight in get_neighbors(G, v):
path_cost = distances[v] + weight
if path_cost < distances[neighbor]:
distances[neighbor] = path_cost
#Swap source and target back
source = swap[0]
path = list()
path.append(source)
current_vertex = source
while current_vertex != target:
# Get the neighbors of the current vertex
neighbors = get_neighbors(G, current_vertex)
# Sort them based on their costs to the current vertex
neighbors.sort(reverse=True, key=lambda tup: tup[1])
costs_to_src = [float('inf')]
for neighbor in neighbors:
if neighbor[0] != source \
and neighbor[0] not in excluded_vertices \
and distances[neighbor[0]] != float('inf') \
and (distances[neighbor[0]] + neighbor[1]) < (min(costs_to_src)):
# Pick the neighbor with (current) minimal cost to target
# The next vertex is determined by its total distance to the target + edge cost from the current vertex
# The vertex with the minimal sum is chosen
current_vertex = neighbor[0]
costs_to_src.append(distances[neighbor[0]] + neighbor[1])
path.append(current_vertex)
# If the same vertex is twice in the path then its a loop and the algorithm can terminate
if len(path) != len(set(path)):
break
return path
|
{"hexsha": "1360ba258d67ad26628450818ad726336b2301b1", "size": 2882, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dijkstra_original.py", "max_stars_repo_name": "philippzabka/resilionator", "max_stars_repo_head_hexsha": "a51d38eec73bdb3fed3646a0234dc39308309273", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/dijkstra_original.py", "max_issues_repo_name": "philippzabka/resilionator", "max_issues_repo_head_hexsha": "a51d38eec73bdb3fed3646a0234dc39308309273", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dijkstra_original.py", "max_forks_repo_name": "philippzabka/resilionator", "max_forks_repo_head_hexsha": "a51d38eec73bdb3fed3646a0234dc39308309273", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-23T14:10:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-23T14:10:36.000Z", "avg_line_length": 35.5802469136, "max_line_length": 119, "alphanum_fraction": 0.6127689105, "include": true, "reason": "import networkx", "num_tokens": 631}
|
import argparse
import logging
import math
import os
from collections import Counter
from typing import Iterable, Iterator, NamedTuple, Tuple
import cv2
import librosa
import numpy as np
import optuna
import pandas as pd
import torch
import torch.nn as nn
from efficientnet_pytorch import EfficientNet
from sklearn import preprocessing
from torch.utils.data import DataLoader, Dataset
from utils.common import load_dz_data
from utils.metrics import Metrics
from utils.test_split import TestSplitter
from utils.cross_validation import CrossValidator
from .config import (
AudioParams,
best_params,
data_params,
spec_aug_combos,
wav_aug_combos,
)
from .dataset import ElephantDataset
from .engine import Engine
from .models import get_pretrained_model
from .utils import split_train_val_test, log_neptune, set_seeds, seed_worker
def run_train(
data_params: dict,
hyper_params: dict,
wav_aug_combos: dict,
spec_aug_combos: dict,
save_model=False,
) -> float:
# For reproducibility
g = torch.Generator()
g.manual_seed(0)
seed = data_params["SEED"]
logging.info(f"SEED {seed} ---------------------")
set_seeds(seed)
target_col = data_params["STRATIFY_COL"]
epochs = hyper_params["epochs"]
# Load Data
df = load_dz_data(data_params["BASE_DATA_DIR"])
n_classes = len(set(df[target_col]))
# Create wav paths
df["wav_path"] = "./data/wavs/" + df["unique_ID"] + ".wav"
lbl_enc = preprocessing.LabelEncoder()
# Split train/val/test
TestSplitter(data_params).get_no_leakage_trainval_test_splits()
df_train, df_valid, df_test = split_train_val_test(df, data_params)
logging.info(
f"Train: {len(df_train)}, Valid: {len(df_valid)}, Test: {len(df_test)}"
)
# Encode target
train_targets = lbl_enc.fit_transform(df_train[target_col])
labels = list(sorted(set(df_train[target_col])))
wav_augs = wav_aug_combos[hyper_params["wav_augs"]]
spec_augs = spec_aug_combos[hyper_params["spec_augs"]]
params = AudioParams()
train_dataset = ElephantDataset(
df_train.wav_path,
train_targets,
params,
wav_augmentations=wav_augs,
spec_augmentations=spec_augs,
)
train_dl = torch.utils.data.DataLoader(
train_dataset,
batch_size=14,
num_workers=4,
pin_memory=True,
worker_init_fn=seed_worker,
generator=g,
)
wav_augs_eval = wav_aug_combos[hyper_params["wav_augs_eval"]]
spec_augs_eval = spec_aug_combos[hyper_params["spec_augs_eval"]]
valid_targets = lbl_enc.fit_transform(df_valid[target_col])
valid_dataset = ElephantDataset(
df_valid.wav_path,
valid_targets,
params,
wav_augmentations=wav_augs_eval,
spec_augmentations=spec_augs_eval,
)
valid_dl = torch.utils.data.DataLoader(
valid_dataset,
batch_size=14,
shuffle=False,
num_workers=4,
pin_memory=True,
worker_init_fn=seed_worker,
generator=g,
)
test_targets = lbl_enc.fit_transform(df_test[target_col])
test_dataset = ElephantDataset(
df_test.wav_path,
test_targets,
params,
wav_augmentations=wav_augs_eval,
spec_augmentations=spec_augs_eval,
)
test_dl = torch.utils.data.DataLoader(
test_dataset,
batch_size=14,
shuffle=False,
num_workers=4,
pin_memory=True,
worker_init_fn=seed_worker,
generator=g,
)
myModel = get_pretrained_model(hyper_params, num_classes=n_classes)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
myModel = myModel.to(device)
# Create class weights to combat imbalance
class_sample_count = list(Counter(train_targets).values())
norm_weights = [1 - (x / sum(class_sample_count)) for x in class_sample_count]
norm_weights = torch.FloatTensor(norm_weights).to(device)
loss_fn = nn.CrossEntropyLoss(weight=norm_weights)
learning_rate = hyper_params["learning_rate"]
optimizer = torch.optim.Adam(myModel.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=learning_rate,
steps_per_epoch=int(len(train_dl)),
epochs=epochs,
anneal_strategy="linear",
)
# Repeat for each epoch
min_valid_loss = math.inf
max_f1_macro = 0
# Poor mans early stopping, abort training if validation loss does not
# improve for x successive epochs where x is 10% of total epochs
early_stopping_iter = epochs // 10
early_stopping_counter = 0
engine = Engine(myModel, optimizer, scheduler, loss_fn, device)
for epoch in range(epochs):
train_loss = engine.train_one_epoch(train_dl)
valid_loss, val_targs, val_preds = engine.validate_one_epoch(valid_dl)
valid_metrics = Metrics(val_targs, val_preds, labels=labels).get_metrics_dict(
prefix="val"
)
test_loss, test_targs, test_preds = engine.validate_one_epoch(test_dl)
test_metrics = Metrics(test_targs, test_preds, labels=labels).get_metrics_dict(
prefix="test"
)
logging.debug(
f"Seed: {seed} ,Training Loss: {train_loss}, Validation Loss: {valid_loss}"
)
if valid_loss < min_valid_loss:
min_valid_loss = valid_loss
best_valid = valid_metrics
best_test = test_metrics
if save_model:
logging.debug("Saving model.....")
torch.save(myModel.state_dict(), f"best_model_params_{seed}.pt")
else:
early_stopping_counter += 1
if early_stopping_counter > early_stopping_iter:
logging.debug(f"Early stopping after {early_stopping_counter} iterations")
break
return best_valid, best_test
def train_one_model(
data_params=data_params,
best_params=best_params,
wav_params=wav_aug_combos,
spec_params=spec_aug_combos,
save_model=True,
):
logging.info("Experiment start ========")
logging.info(f"Params:\n{best_params}")
for seed in range(100, 400, 100):
data_params["SEED"] = seed
data_params["TRAIN_TEST_SPLIT_SEED"] = seed
# Create train/test split files
TestSplitter(data_params).get_no_leakage_trainval_test_splits()
val_metrics, test_metrics = run_train(
data_params, best_params, wav_params, spec_params, save_model=save_model
)
logging.info(
f"Seed: {seed}, val_metrics:\n{val_metrics}\n, \nTest Metrics \n{test_metrics}"
)
logging.info("Experiment End ========")
if __name__ == "__main__":
logging.basicConfig(filename="best_model.log", level=logging.INFO)
# Experiment 1, best model
train_one_model(save_model=True)
"""
# Experiment 2,
for wav in wav_aug_combos:
#if wav != best_params["wav_augs"]:
params = best_params
params["wav_augs"] = wav
train_one_model(save_model=False, best_params=params)
for spec in spec_aug_combos:
#if spec != best_params["spec_augs"]:
params = best_params
params["spec_augs"] = spec
train_one_model(save_model=False, best_params=params)
"""
|
{"hexsha": "fdac6c95c134e101ffe886ec723da7a7e341121c", "size": 7324, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/train_model.py", "max_stars_repo_name": "karmatarap/capstone_project", "max_stars_repo_head_hexsha": "16f2b60be9634efec772c7dffb6efb90c37880ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-30T20:19:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-22T03:19:55.000Z", "max_issues_repo_path": "src/models/train_model.py", "max_issues_repo_name": "karmatarap/capstone_project", "max_issues_repo_head_hexsha": "16f2b60be9634efec772c7dffb6efb90c37880ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/train_model.py", "max_forks_repo_name": "karmatarap/capstone_project", "max_forks_repo_head_hexsha": "16f2b60be9634efec772c7dffb6efb90c37880ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-08T00:35:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-08T00:35:35.000Z", "avg_line_length": 30.1399176955, "max_line_length": 91, "alphanum_fraction": 0.6774986346, "include": true, "reason": "import numpy", "num_tokens": 1713}
|
import numpy as np
from nibabel.affines import from_matvec
from nibabel.eulerangles import euler2mat
from ..patched import obliquity
def test_obliquity():
"""Check the calculation of inclination of an affine axes."""
from math import pi
aligned = np.diag([2.0, 2.0, 2.3, 1.0])
aligned[:-1, -1] = [-10, -10, -7]
R = from_matvec(euler2mat(x=0.09, y=0.001, z=0.001), [0.0, 0.0, 0.0])
oblique = R.dot(aligned)
np.testing.assert_almost_equal(obliquity(aligned), [0.0, 0.0, 0.0])
np.testing.assert_almost_equal(obliquity(oblique) * 180 / pi,
[0.0810285, 5.1569949, 5.1569376])
|
{"hexsha": "f76293f7cf7a8e881c640a84378b82e308a196e4", "size": 639, "ext": "py", "lang": "Python", "max_stars_repo_path": "nitransforms/tests/test_affines.py", "max_stars_repo_name": "mgxd/nitransforms", "max_stars_repo_head_hexsha": "a922f3cb8ee1df5b484f617c34e1816a726e54e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nitransforms/tests/test_affines.py", "max_issues_repo_name": "mgxd/nitransforms", "max_issues_repo_head_hexsha": "a922f3cb8ee1df5b484f617c34e1816a726e54e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nitransforms/tests/test_affines.py", "max_forks_repo_name": "mgxd/nitransforms", "max_forks_repo_head_hexsha": "a922f3cb8ee1df5b484f617c34e1816a726e54e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9375, "max_line_length": 73, "alphanum_fraction": 0.6400625978, "include": true, "reason": "import numpy", "num_tokens": 222}
|
Load LFindLoad.
From lfind Require Import LFind.
Require Import Arith.
From adtind Require Import goal49.
Require Import Extraction.
Extract Inductive nat => nat [ "(O)" "S" ].
Extract Inductive list => list [ "Nil" "Cons" ].
Definition lfind_example_1 := ( false).
Definition lfind_example_2 := ( false).
Definition lfind_example_3 := ( false).
Definition lfind_example_4 := ( false).
Definition lfind_example_5 := ( false).
Definition lfind_example_6 := ( false).
Definition lfind_example_7 := ( true).
Definition lfind_example_8 := ( false).
Definition lfind_example_9 := ( false).
Definition lfind_example_10 := ( false).
Definition lfind_example_11 := ( true).
Definition lfind_example_12 := ( true).
Definition lfind_example_13 := ( true).
Definition lfind_example_14 := ( false).
Definition lfind_example_15 := ( false).
Definition lfind_example_16 := ( false).
Extraction "/home/yousef/lemmafinder/benchmark/_lfind_clam_lf_goal49_theorem0_158_eqb_refl/lfind_extraction.ml" lfind_example_1 lfind_example_2 lfind_example_3 lfind_example_4 lfind_example_5 lfind_example_6 lfind_example_7 lfind_example_8 lfind_example_9 lfind_example_10 lfind_example_11 lfind_example_12 lfind_example_13 lfind_example_14 lfind_example_15 lfind_example_16 .
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal49_theorem0_158_eqb_refl/lfind_extraction.v"}
|
import numpy as np
from catboost import Pool, CatBoostClassifier
from catboost.utils import read_cd
from gbdt_uncertainty.data import process_classification_dataset
from gbdt_uncertainty.assessment import prr_class, ood_detect, nll_class
from gbdt_uncertainty.uncertainty import entropy_of_expected_class, expected_entropy_class, entropy
from sklearn.metrics import zero_one_loss, log_loss
from scipy.stats import ttest_rel
import math
import os
import joblib
import sys
from collections import defaultdict
datasets = ["adult", "amazon", "click", "internet", "appetency", "churn", "upselling", "kick"]
algorithms = ['sgb-fixed', 'sglb-fixed']
# for proper tables
convert_name = {"adult": "Adult", "amazon": "Amazon", "click": "Click",
"internet": "Internet", "appetency": "KDD-Appetency", "churn": "KDD-Churn",
"upselling": "KDD-Upselling", "kick": "Kick"}
def sigmoid(z):
return 1/(1 + np.exp(-z))
def load_model(name, alg, i):
if alg == "rf":
model = joblib.load("results/models/" + name + "_" + alg + "_" + str(i))
else:
model = CatBoostClassifier()
model.load_model("results/models/" + name + "_" + alg + "_" + str(i))
return model
def rf_virtual_ensembles_predict(model, X, count=10):
trees = model.estimators_
num_trees = len(trees)
ens_preds = []
for i in range(count):
indices = range(int(i*num_trees/count), int((i+1)*num_trees/count))
all_preds = []
for ind in indices:
all_preds.append(trees[ind].predict_proba(X))
all_preds = np.array(all_preds)
preds = np.mean(all_preds, axis=0)
ens_preds.append(preds)
ens_preds = np.array(ens_preds)
return np.swapaxes(ens_preds, 0, 1)
def virtual_ensembles_predict(X, model, alg, num_models=10):
if alg == "rf":
all_preds = rf_virtual_ensembles_predict(model, X, count=num_models)
else:
all_preds = model.virtual_ensembles_predict(X, prediction_type='VirtEnsembles', virtual_ensembles_count=num_models)
all_preds = sigmoid(all_preds)
all_preds = np.concatenate((1 - all_preds, all_preds), axis=2)
return np.swapaxes(all_preds, 0, 1)
def compute_significance(values_all, metric, minimize=True):
values_mean = np.mean(values_all, axis=1)
# choose best algorithm
if minimize:
best_idx = np.nanargmin(values_mean)
else:
best_idx = np.nanargmax(values_mean)
textbf = {best_idx} # for all algorithms insignificantly different from the best one
# compute statistical significance on test
for idx in range(len(values_mean)):
test = ttest_rel(values_all[best_idx], values_all[idx]) # paired t-test
if test[1] > 0.05:
textbf.add(idx)
return values_mean, textbf
def compute_best(values, minimize=True):
# choose best algorithm
if minimize:
best_idx = np.nanargmin(values)
else:
best_idx = np.nanargmax(values)
textbf = {best_idx}
for idx in range(len(values)):
if values[best_idx] == values[idx]:
textbf.add(idx)
return textbf
def make_table_entry(values_all, metric, minimize=True, round=2):
num_values = len(values_all)
values_mean, textbf = compute_significance(values_all, metric, minimize=minimize)
# prepare all results in latex format
table = ""
for idx in range(num_values):
if idx in textbf:
table += "\\textbf{" + str(np.round(values_mean[idx], round)) + "} "
else:
table += str(np.round(values_mean[idx], round)) + " "
table += "& "
return table
def normalize_test_labels(y_test):
y_test_norm = []
c0 = min(y_test)
for y in y_test:
if y == c0:
y_test_norm.append(0)
else:
y_test_norm.append(1)
return np.array(y_test_norm)
def aggregate_results(name, modes = ["single", "ens", "virt"],
algorithms = ['sgb-fixed', 'sglb-fixed'], num_models = 10):
results = [] # metric values for all algorithms and all folds
for mode in modes:
for alg in algorithms:
if alg == "rf":
train_pool, y_train, test_pool, y_test, enc = process_classification_dataset(name)
# process ood data
cd = read_cd("datasets/"+name+"/pool.cd", data_file = "datasets/"+name+"/test")
try:
label_ind = cd['column_type_to_indices']['Label']
except:
label_ind = cd['column_type_to_indices']['Target']
ood_test_pool = np.loadtxt("datasets/ood/" + name, delimiter="\t", dtype="object")
ood_test_pool = enc.transform(ood_test_pool).astype("float64")
ood_test_pool = np.delete(ood_test_pool, label_ind, 1)
ood_size = len(ood_test_pool)
else:
test_pool = Pool(data="datasets/"+name+"/test", column_description="datasets/"+name+"/pool.cd")
ood_test_pool = Pool(data="datasets/ood/" + name, column_description="datasets/"+name+"/pool.cd")
ood_size = ood_test_pool.num_row()
y_test = test_pool.get_label()
test_size = len(y_test)
domain_labels = np.concatenate([np.zeros(test_size), np.ones(ood_size)])
y_test_norm = normalize_test_labels(y_test)
values = defaultdict() # metric values for all folds for given algorithm
if mode == "single":
# use 0th model from ensemble as a single model
model = load_model(name, alg, 0)
preds = model.predict(test_pool)
preds_proba = model.predict_proba(test_pool)
values["error"] = (preds != y_test).astype(int)
values["nll"] = nll_class(y_test_norm, preds_proba)
values["TU_prr"] = prr_class(y_test_norm, preds_proba, entropy(preds_proba), False)
values["KU_prr"] = float("nan")
values["KU_auc"] = float("nan")
ood_preds_proba = model.predict_proba(ood_test_pool)
in_measure = entropy(preds_proba)
out_measure = entropy(ood_preds_proba)
values["TU_auc"] = ood_detect(domain_labels, in_measure, out_measure, mode="ROC")
if mode == "ens":
all_preds = [] # predictions of all models in ensemble
all_preds_ood = []
for i in range(num_models):
model = load_model(name, alg, i)
preds = model.predict_proba(test_pool)
all_preds.append(preds)
preds = model.predict_proba(ood_test_pool)
all_preds_ood.append(preds)
all_preds = np.array(all_preds)
preds_proba = np.mean(all_preds, axis=0)
all_preds_ood = np.array(all_preds_ood)
preds = np.argmax(preds_proba, axis=1)
values["error"] = (preds != y_test_norm).astype(int)
values["nll"] = nll_class(y_test_norm, preds_proba)
TU = entropy_of_expected_class(all_preds)
DU = expected_entropy_class(all_preds)
KU = TU - DU
TU_ood = entropy_of_expected_class(all_preds_ood)
DU_ood = expected_entropy_class(all_preds_ood)
KU_ood = TU_ood - DU_ood
values["TU_prr"] = prr_class(y_test_norm, preds_proba, TU, False)
values["KU_prr"] = prr_class(y_test_norm, preds_proba, KU, False)
values["TU_auc"] = ood_detect(domain_labels, TU, TU_ood, mode="ROC")
values["KU_auc"] = ood_detect(domain_labels, KU, KU_ood, mode="ROC")
if mode == "virt":
if alg in ["sgb", "sgb-fixed"]: # we do not evaluate virtual sgb model
continue
# generate virtual ensemble from 0th model
model = load_model(name, alg, 0)
all_preds = virtual_ensembles_predict(test_pool, model, alg)
preds_proba = np.mean(all_preds, axis=0)
preds = np.argmax(preds_proba, axis=1)
values["error"] = (preds != y_test_norm).astype(int)
values["nll"] = nll_class(y_test_norm, preds_proba)
TU = entropy_of_expected_class(all_preds)
DU = expected_entropy_class(all_preds)
KU = TU - DU
all_preds_ood = virtual_ensembles_predict(ood_test_pool, model, alg)
TU_ood = entropy_of_expected_class(all_preds_ood)
DU_ood = expected_entropy_class(all_preds_ood)
KU_ood = TU_ood - DU_ood
values["TU_prr"] = prr_class(y_test_norm, preds_proba, TU, False)
values["KU_prr"] = prr_class(y_test_norm, preds_proba, KU, False)
values["TU_auc"] = ood_detect(domain_labels, TU, TU_ood, mode="ROC")
values["KU_auc"] = ood_detect(domain_labels, KU, KU_ood, mode="ROC")
if mode == "virt" and alg in ["sgb", "sgb-fixed"]: # we do not evaluate virtual sgb model
continue
results.append(values)
return np.array(results)
def make_table_element(mean, textbf, idx):
table = ""
if np.isnan(mean[idx]):
table += "--- & "
return table
if idx in textbf:
table += "\\textbf{" + str(int(np.rint(mean[idx]))) + "} "
else:
table += str(int(np.rint(mean[idx]))) + " "
table += "& "
return table
table_type = sys.argv[1]
if table_type == "nll_error":
print("===NLL and Error Table===")
for name in datasets:
values = aggregate_results(name)
table = convert_name[name] + " & "
values_nll = np.array([values[i]["nll"] for i in range(len(values))])
values_error = np.array([values[i]["error"] for i in range(len(values))])
table += make_table_entry(values_nll, "nll", round=3)
table += make_table_entry(values_error*100, "error", round=1)
print(table.rstrip("& ") + " \\\\")
if table_type == "prr_auc":
print("===PRR and AUC-ROC Table===")
for name in datasets:
values = aggregate_results(name)
prr_TU = np.array([values[i]["TU_prr"] for i in range(len(values))])
prr_KU = np.array([values[i]["KU_prr"] for i in range(len(values))])
prr = np.concatenate((prr_TU, prr_KU), axis=0)
textbf_prr = compute_best(prr, minimize=False)
auc_TU = np.array([values[i]["TU_auc"] for i in range(len(values))])
auc_KU = np.array([values[i]["KU_auc"] for i in range(len(values))])
auc = 100*np.concatenate((auc_TU, auc_KU), axis=0)
textbf_auc = compute_best(auc, minimize=False)
num = len(auc_TU)
table = "\multirow{2}{*} {" + convert_name[name] + "} & TU & "
for idx in range(num):
table += make_table_element(prr, textbf_prr, idx)
for idx in range(num):
table += make_table_element(auc, textbf_auc, idx)
print(table.rstrip("& ") + " \\\\")
table = " & KU & "
for idx in range(num, 2*num):
table += make_table_element(prr, textbf_prr, idx)
for idx in range(num, 2*num):
table += make_table_element(auc, textbf_auc, idx)
print(table.rstrip("& ") + " \\\\")
print("\midrule")
if table_type == "rf_nll_error":
print("===Comparison with random forest, NLL and Error===")
for name in datasets:
values = aggregate_results(name, algorithms=["sglb-fixed", "rf"], modes=["single", "ens"])
table = convert_name[name] + " & "
values_nll = np.array([values[i]["nll"] for i in range(len(values))])
values_error = np.array([values[i]["error"] for i in range(len(values))])
table += make_table_entry(values_nll, "nll", round=3)
table += make_table_entry(values_error*100, "error", round=1)
print(table.rstrip("& ") + " \\\\")
if table_type == "rf_prr_auc":
print("===Comparison with random forest, PRR and AUC-ROC===")
for name in datasets:
values = aggregate_results(name, algorithms=["sglb-fixed", "rf"], modes=["virt", "ens"])
prr_TU = np.array([values[i]["TU_prr"] for i in range(len(values))])
prr_KU = np.array([values[i]["KU_prr"] for i in range(len(values))])
prr = np.concatenate((prr_TU, prr_KU), axis=0)
textbf_prr = compute_best(prr, minimize=False)
auc_TU = np.array([values[i]["TU_auc"] for i in range(len(values))])
auc_KU = np.array([values[i]["KU_auc"] for i in range(len(values))])
auc = 100*np.concatenate((auc_TU, auc_KU), axis=0)
textbf_auc = compute_best(auc, minimize=False)
num = len(auc_TU)
table = "\multirow{2}{*} {" + convert_name[name] + "} & TU & "
for idx in range(num):
table += make_table_element(prr, textbf_prr, idx)
for idx in range(num):
table += make_table_element(auc, textbf_auc, idx)
print(table.rstrip("& ") + " \\\\")
table = " & KU & "
for idx in range(num, 2*num):
table += make_table_element(prr, textbf_prr, idx)
for idx in range(num, 2*num):
table += make_table_element(auc, textbf_auc, idx)
print(table.rstrip("& ") + " \\\\")
print("\midrule")
|
{"hexsha": "e0d88e57eb435df59c2bb629273f86b7ebefc302", "size": 14638, "ext": "py", "lang": "Python", "max_stars_repo_path": "aggregate_results_classification.py", "max_stars_repo_name": "yandex-research/GBDT-uncertainty", "max_stars_repo_head_hexsha": "339264ee82c1ec2b22d4200d3b9c18fcce56bb0d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2021-03-31T07:35:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T12:56:20.000Z", "max_issues_repo_path": "aggregate_results_classification.py", "max_issues_repo_name": "yandex-research/GBDT-uncertainty", "max_issues_repo_head_hexsha": "339264ee82c1ec2b22d4200d3b9c18fcce56bb0d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aggregate_results_classification.py", "max_forks_repo_name": "yandex-research/GBDT-uncertainty", "max_forks_repo_head_hexsha": "339264ee82c1ec2b22d4200d3b9c18fcce56bb0d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-15T12:24:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T14:23:39.000Z", "avg_line_length": 38.8275862069, "max_line_length": 124, "alphanum_fraction": 0.5457712802, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3421}
|
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Convolution layer tests
"""
from builtins import zip
import numpy as np
import pytest
from neon import NervanaObject, logger as neon_logger
from neon.layers import Sequential, Conv, Pooling, MergeBroadcast, Affine
from neon.initializers.initializer import Gaussian, Constant
from neon.transforms import Rectlin, Softmax
from neon.layers.container import DeltasTree
from utils import allclose_with_out
init1 = Gaussian(scale=0.01)
relu = Rectlin()
bias = Constant(0)
common = dict(activation=relu, init=init1, bias=bias)
commonp1 = dict(activation=relu, init=init1, bias=bias, padding=1)
commonp3s2 = dict(activation=relu, init=init1, bias=bias, padding=3, strides=2)
pool3s1p1 = dict(fshape=3, padding=1, strides=1)
batch_size = 64
def fshape(rs, k):
return (rs, rs, k)
def inception(kvals, name="i"):
(p1, p2, p3) = kvals
branch1 = [Sequential([Conv(fshape(1, p1[0]), **common)])] if p1[0] else []
branch2 = [Sequential([Conv(fshape(1, p2[0]), **common),
Conv(fshape(3, p2[1]), **commonp1)])]
branch3 = [Sequential([Pooling(op=p3[0], **pool3s1p1)] + (
[Conv(fshape(1, p3[1]), **common)] if p3[1] else []))]
partitions = branch1 + branch2 + branch3
return [MergeBroadcast(layers=partitions, merge="depth")]
def inception_bare(ref_module, kvals, name="i"):
(p1, p2, p3) = kvals
branch1 = [Conv(fshape(1, p1[0]), **common)] if p1[0] else []
branch2 = [Conv(fshape(1, p2[0]), **common), Conv(fshape(3, p2[1]), **commonp1)]
branch3 = [Pooling(op=p3[0], **pool3s1p1)] + (
[Conv(fshape(1, p3[1]), **common)] if p3[1] else [])
branch1 = Sequential(branch1)
branch2 = Sequential(branch2)
branch3 = Sequential(branch3)
(branch1_ref, branch2_ref, branch3_ref) = ref_module[0].layers
if p1[0]:
for ll, lr in zip(branch1.layers, branch1_ref.layers):
if ll.has_params:
ll.set_params({'params': {'W': lr.W.get(), 'weight_bias': lr.weight_bias.get()}})
for ll, lr in zip(branch2.layers, branch2_ref.layers):
if ll.has_params:
ll.set_params({'params': {'W': lr.W.get(), 'weight_bias': lr.weight_bias.get()}})
if p3[1]:
for ll, lr in zip(branch3.layers, branch3_ref.layers):
if ll.has_params:
ll.set_params({'params': {'W': lr.W.get(), 'weight_bias': lr.weight_bias.get()}})
return (branch1.layers, branch2.layers, branch3.layers)
def main_branch():
return [Conv(fshape(7, 64), **commonp3s2),
Pooling(fshape=3, strides=2, padding=1, op="max"),
Conv(fshape(3, 192), **commonp1),
Pooling(fshape=3, strides=2, padding=1, op="max")]
def top_branch():
return [Pooling(fshape=7, strides=1, op="avg"),
Affine(nout=100, init=init1, activation=Softmax(), bias=bias)]
@pytest.mark.hasgpu
def test_branch_model(backend_gpu):
np.random.seed(0)
be = NervanaObject.be
be.bsz = 64
main1 = main_branch()
i1 = inception([(32,), (32, 32), ('max', 16)])
top = top_branch()
neon_layer = Sequential(main1 + i1 + top)
inshape = (4, 224, 224)
insize = np.prod(inshape)
inpa = np.random.random((insize, batch_size))
neon_layer.configure(inshape)
inp = neon_layer.be.array(inpa)
neon_layer.allocate()
neon_logger.display(neon_layer.nested_str())
neon_layer.layers[0].prev_layer = True
neon_layer.allocate_deltas()
neon_out = neon_layer.fprop(inp).get()
# Now make the reference pathways:
main_trunk2 = Sequential(main_branch())
main_trunk2.configure(inshape)
main2 = main_trunk2.layers
main2[0].prev_layer = True
main2[0].deltas = be.iobuf(inshape)
(b1, b2, b3) = inception_bare(i1, [(32,), (32, 32), ('max', 16)])
for bb in (b1, b2, b3):
oshape = inshape
for ll in main2 + bb:
oshape = ll.configure(oshape)
main1_trunk = neon_layer.layers[:6]
for ll, lo in zip(main2, main1_trunk):
if ll.has_params:
ll.set_params({'params': {'W': lo.W.get(), 'weight_bias': lo.weight_bias.get()}})
ll.allocate()
temp_buff = DeltasTree()
ll.allocate_deltas(temp_buff)
temp_buff.allocate_buffers()
ll.set_deltas(temp_buff)
for bb in (b1, b2, b3):
for ll in bb:
ll.allocate()
temp_buff = DeltasTree()
ll.allocate_deltas(temp_buff)
temp_buff.allocate_buffers()
ll.set_deltas(temp_buff)
# Create the combined output buffer
merge_output = be.empty_like(neon_layer.layers[6].outputs)
x = inp
for ll in main2:
x = ll.fprop(x)
start = 0
for bb in (b1, b2, b3):
xb = x
for ll in bb:
xb = ll.fprop(xb)
end = start + xb.shape[0]
merge_output[start:end] = xb
start = end
x = merge_output
top_trunk = Sequential(top).layers
for ll in top_trunk:
x = ll.fprop(x)
neon_out_ref = x.get()
assert allclose_with_out(neon_out, neon_out_ref, rtol=0)
neon_logger.display("Beginning Back prop")
erra = np.random.random(neon_out.shape)
err = be.array(erra)
for ll in reversed(neon_layer.layers[6:]):
err = ll.bprop(err)
neon_deltas = err.get()
for bb, errb in zip((b1, b2, b3), neon_layer.layers[6].error_views):
for ll in reversed(bb):
errb = ll.bprop(errb)
# Now sum up the deltas at the root of the branch layer and compare
ref_deltas = be.zeros_like(b1[0].deltas)
ref_deltas[:] = b3[0].deltas + b2[0].deltas + b1[0].deltas
neon_ref_deltas = ref_deltas.get()
assert allclose_with_out(neon_deltas, neon_ref_deltas, rtol=0)
@pytest.mark.hasgpu
def test_branch_model_fork(backend_gpu):
from neon.layers import BranchNode, Tree
np.random.seed(0)
be = NervanaObject.be
if be.gpu_memory_size < 6.1 * 1024 * 1024 * 1024:
pytest.skip(msg='Test requires more than 6.1GB')
be.bsz = 64
bnode = BranchNode()
i1 = inception([(32,), (32, 32), ('max', 16)])
top1 = top_branch()
top2 = top_branch()
p1 = Sequential(main_branch() + [bnode, i1] + top1)
p2 = [bnode] + top2
alpha2 = 0.3
neon_layer = Tree([p1, p2], alphas=[1.0, alpha2])
inshape = (4, 224, 224)
insize = np.prod(inshape)
inpa = np.random.random((insize, batch_size))
neon_layer.configure(inshape)
inp = neon_layer.be.array(inpa)
neon_layer.allocate()
neon_layer.layers[0].layers[0].prev_layer = True
neon_layer.allocate_deltas()
neon_out_dev = neon_layer.fprop(inp)
neon_out = [d.get() for d in neon_out_dev]
# Now make the reference pathways:
main_trunk2 = Sequential(main_branch())
main_trunk2.configure(inshape)
main2 = main_trunk2.layers
main2[0].prev_layer = True
main2[0].deltas = be.iobuf(inshape)
branch2 = Sequential(top_branch())
lbranch2 = branch2.layers
(b1, b2, b3) = inception_bare(i1, [(32,), (32, 32), ('max', 16)])
for bb in (b1, b2, b3, lbranch2):
oshape = inshape
for ll in main2 + bb:
oshape = ll.configure(oshape)
main1_trunk = neon_layer.layers[0].layers[:6]
for ll, lo in zip(main2, main1_trunk):
if ll.has_params:
ll.set_params({'params': {'W': lo.W.get(), 'weight_bias': lo.weight_bias.get()}})
ll.allocate()
temp_deltas = DeltasTree()
temp_deltas.proc_layer(ll)
temp_deltas.allocate_buffers()
ll.set_deltas(temp_deltas)
for ll, lo in zip(lbranch2, neon_layer.layers[1].layers[1:]):
if ll.has_params:
ll.set_params({'params': {'W': lo.W.get()}})
for bb in (b1, b2, b3, lbranch2):
for ll in bb:
ll.allocate()
temp_deltas = DeltasTree()
temp_deltas.proc_layer(ll)
temp_deltas.allocate_buffers()
ll.set_deltas(temp_deltas)
# Create the combined output buffer
merge_output = be.empty_like(neon_layer.layers[0].layers[7].outputs)
x = inp
for ll in main2:
x = ll.fprop(x)
main2_out = x
start = 0
for bb in (b1, b2, b3):
xb = main2_out
for ll in bb:
xb = ll.fprop(xb)
end = start + xb.shape[0]
merge_output[start:end] = xb
start = end
x = merge_output
top_trunk = Sequential(top1).layers
for ll in top_trunk:
x = ll.fprop(x)
neon_out_ref = x.get()
assert allclose_with_out(neon_out_ref, neon_out[0], rtol=0)
# Now do second branch
neon_out_ref2 = branch2.fprop(main2_out).get()
assert allclose_with_out(neon_out_ref2, neon_out[1])
neon_logger.display("Beginning Back prop")
erra = [np.random.random(d.shape) for d in neon_out]
err = [be.array(d) for d in erra]
neon_layer.layers[0].layers[0].deltas = be.iobuf(inshape)
neon_layer.bprop(err)
bottom_neon_deltas = neon_layer.layers[0].layers[1].deltas.get()
middle_neon_deltas = neon_layer.layers[1].layers[1].deltas.get()
err0 = err[0]
for ll in reversed(top_trunk):
err0 = ll.bprop(err0)
err1 = err[1]
for ll in reversed(lbranch2):
err1 = ll.bprop(err1)
for bb, errb in zip((b1, b2, b3), neon_layer.layers[0].layers[-5].error_views):
for ll in reversed(bb):
errb = ll.bprop(errb)
# Now sum up the deltas at the root of the branch layer and compare
ref_deltas = be.zeros_like(b1[0].deltas)
ref_deltas[:] = alpha2 * lbranch2[0].deltas
ref_deltas[:] = ref_deltas + b3[0].deltas + b2[0].deltas + b1[0].deltas
neon_ref_deltas = ref_deltas.get()
assert allclose_with_out(middle_neon_deltas, neon_ref_deltas, rtol=0)
x = ref_deltas
main2[0].deltas = be.iobuf(inshape)
for ll in reversed(main2):
x = ll.bprop(x)
bottom_neon_ref_deltas = main2[1].deltas.get()
assert allclose_with_out(bottom_neon_deltas, bottom_neon_ref_deltas, rtol=0)
@pytest.mark.unsupported
@pytest.mark.skip(reason="Not supported for CPU")
def test_branch_model_mkl(backend_mkl):
np.random.seed(0)
be = NervanaObject.be
be.bsz = 32
main1 = main_branch()
i1 = inception([(32,), (32, 32), ('max', 16)])
top = top_branch()
neon_layer = Sequential(main1 + i1 + top)
inshape = (4, 224, 224)
insize = np.prod(inshape)
inpa = np.random.random((insize, batch_size))
neon_layer.configure(inshape)
inp = neon_layer.be.array(inpa)
neon_layer.allocate()
neon_logger.display(neon_layer.nested_str())
neon_layer.layers[0].prev_layer = True
neon_layer.allocate_deltas()
neon_out = neon_layer.fprop(inp).get()
# Now make the reference pathways:
main_trunk2 = Sequential(main_branch())
main_trunk2.configure(inshape)
main2 = main_trunk2.layers
main2[0].prev_layer = True
main2[0].deltas = be.iobuf(inshape)
(b1, b2, b3) = inception_bare(i1, [(32,), (32, 32), ('max', 16)])
for bb in (b1, b2, b3):
oshape = inshape
for ll in main2 + bb:
oshape = ll.configure(oshape)
main1_trunk = neon_layer.layers[:8]
for ll, lo in zip(main2, main1_trunk):
if ll.has_params:
ll.set_params({'params': {'W': lo.W.get()}})
ll.allocate()
temp_buff = DeltasTree()
ll.allocate_deltas(temp_buff)
temp_buff.allocate_buffers()
ll.set_deltas(temp_buff)
for bb in (b1, b2, b3):
for ll in bb:
ll.allocate()
temp_buff = DeltasTree()
ll.allocate_deltas(temp_buff)
temp_buff.allocate_buffers()
ll.set_deltas(temp_buff)
# Create the combined output buffer
merge_output = be.empty_like(neon_layer.layers[8].outputs)
x = inp
for ll in main2:
x = ll.fprop(x)
start = 0
for bb in (b1, b2, b3):
xb = x
for ll in bb:
xb = ll.fprop(xb)
end = start + xb.shape[0]
merge_output[start:end] = xb
start = end
x = merge_output
top_trunk = Sequential(top).layers
for ll in top_trunk:
x = ll.fprop(x)
neon_out_ref = x.get()
assert allclose_with_out(neon_out, neon_out_ref, rtol=0)
neon_logger.display("Beginning Back prop")
erra = np.random.random(neon_out.shape)
err = be.array(erra)
for ll in reversed(neon_layer.layers[8:]):
err = ll.bprop(err)
neon_deltas = err.get()
for bb, errb in zip((b1, b2, b3), neon_layer.layers[8].error_views):
for ll in reversed(bb):
errb = ll.bprop(errb)
# Now sum up the deltas at the root of the branch layer and compare
ref_deltas = be.zeros_like(b1[0].deltas)
ref_deltas[:] = b3[0].deltas + b2[0].deltas + b1[0].deltas
neon_ref_deltas = ref_deltas.get()
assert allclose_with_out(neon_deltas, neon_ref_deltas, rtol=0)
@pytest.mark.unsupported
@pytest.mark.skip(reason="Not supported for CPU")
def test_branch_model_fork_mkl(backend_mkl):
from neon.layers import BranchNode, Tree
np.random.seed(0)
be = NervanaObject.be
be.bsz = 32
bnode = BranchNode()
i1 = inception([(32,), (32, 32), ('max', 16)])
top1 = top_branch()
top2 = top_branch()
p1 = Sequential(main_branch() + [bnode, i1] + top1)
p2 = [bnode] + top2
alpha2 = 0.3
neon_layer = Tree([p1, p2], alphas=[1.0, alpha2])
inshape = (4, 224, 224)
insize = np.prod(inshape)
inpa = np.random.random((insize, batch_size))
neon_layer.configure(inshape)
inp = neon_layer.be.array(inpa)
neon_layer.allocate()
neon_layer.layers[0].layers[0].prev_layer = True
neon_layer.allocate_deltas()
neon_out_dev = neon_layer.fprop(inp)
neon_out = [d.get() for d in neon_out_dev]
# Now make the reference pathways:
main_trunk2 = Sequential(main_branch())
main_trunk2.configure(inshape)
main2 = main_trunk2.layers
main2[0].prev_layer = True
main2[0].deltas = be.iobuf(inshape)
branch2 = Sequential(top_branch())
lbranch2 = branch2.layers
(b1, b2, b3) = inception_bare(i1, [(32,), (32, 32), ('max', 16)])
for bb in (b1, b2, b3, lbranch2):
oshape = inshape
for ll in main2 + bb:
oshape = ll.configure(oshape)
main1_trunk = neon_layer.layers[0].layers[:8]
for ll, lo in zip(main2, main1_trunk):
if ll.has_params:
ll.set_params({'params': {'W': lo.W.get()}})
ll.allocate()
temp_deltas = DeltasTree()
temp_deltas.proc_layer(ll)
temp_deltas.allocate_buffers()
ll.set_deltas(temp_deltas)
for ll, lo in zip(lbranch2, neon_layer.layers[1].layers[1:]):
if ll.has_params:
ll.set_params({'params': {'W': lo.W.get()}})
for bb in (b1, b2, b3, lbranch2):
for ll in bb:
ll.allocate()
temp_deltas = DeltasTree()
temp_deltas.proc_layer(ll)
temp_deltas.allocate_buffers()
ll.set_deltas(temp_deltas)
# Create the combined output buffer
merge_output = be.empty_like(neon_layer.layers[0].layers[9].outputs)
x = inp
for ll in main2:
x = ll.fprop(x)
main2_out = x
start = 0
for bb in (b1, b2, b3):
xb = main2_out
for ll in bb:
xb = ll.fprop(xb)
end = start + xb.shape[0]
merge_output[start:end] = xb
start = end
x = merge_output
top_trunk = Sequential(top1).layers
for ll in top_trunk:
x = ll.fprop(x)
neon_out_ref = x.get()
assert allclose_with_out(neon_out_ref, neon_out[0], rtol=0)
# Now do second branch
neon_out_ref2 = branch2.fprop(main2_out).get()
assert allclose_with_out(neon_out_ref2, neon_out[1])
neon_logger.display("Beginning Back prop")
erra = [np.random.random(d.shape) for d in neon_out]
err = [be.array(d) for d in erra]
neon_layer.layers[0].layers[0].deltas = be.iobuf(inshape)
neon_layer.bprop(err)
bottom_neon_deltas = neon_layer.layers[0].layers[1].deltas.get()
middle_neon_deltas = neon_layer.layers[1].layers[1].deltas.get()
err0 = err[0]
for ll in reversed(top_trunk):
err0 = ll.bprop(err0)
err1 = err[1]
for ll in reversed(lbranch2):
err1 = ll.bprop(err1)
for bb, errb in zip((b1, b2, b3), neon_layer.layers[0].layers[-5].error_views):
for ll in reversed(bb):
errb = ll.bprop(errb)
# Now sum up the deltas at the root of the branch layer and compare
ref_deltas = be.zeros_like(b1[0].deltas)
ref_deltas[:] = alpha2 * lbranch2[0].deltas
ref_deltas[:] = ref_deltas + b3[0].deltas + b2[0].deltas + b1[0].deltas
neon_ref_deltas = ref_deltas.get()
assert allclose_with_out(middle_neon_deltas, neon_ref_deltas, rtol=0)
x = ref_deltas
main2[0].deltas = be.iobuf(inshape)
for ll in reversed(main2):
x = ll.bprop(x)
bottom_neon_ref_deltas = main2[1].deltas.get()
assert allclose_with_out(bottom_neon_deltas, bottom_neon_ref_deltas, rtol=0)
@pytest.mark.unsupported
@pytest.mark.skip(reason="Not supported for CPU")
def test_branch_model_cpu(backend_cpu64):
np.random.seed(0)
be = NervanaObject.be
be.bsz = 32
main1 = main_branch()
i1 = inception([(32,), (32, 32), ('max', 16)])
top = top_branch()
neon_layer = Sequential(main1 + i1 + top)
inshape = (4, 224, 224)
insize = np.prod(inshape)
inpa = np.random.random((insize, batch_size))
neon_layer.configure(inshape)
inp = neon_layer.be.array(inpa)
neon_layer.allocate()
neon_logger.display(neon_layer.nested_str())
neon_layer.layers[0].prev_layer = True
neon_layer.allocate_deltas()
neon_out = neon_layer.fprop(inp).get()
# Now make the reference pathways:
main_trunk2 = Sequential(main_branch())
main_trunk2.configure(inshape)
main2 = main_trunk2.layers
main2[0].prev_layer = True
main2[0].deltas = be.iobuf(inshape)
(b1, b2, b3) = inception_bare(i1, [(32,), (32, 32), ('max', 16)])
for bb in (b1, b2, b3):
oshape = inshape
for ll in main2 + bb:
oshape = ll.configure(oshape)
main1_trunk = neon_layer.layers[:8]
for ll, lo in zip(main2, main1_trunk):
if ll.has_params:
ll.set_params({'params': {'W': lo.W.get()}})
ll.allocate()
temp_buff = DeltasTree()
ll.allocate_deltas(temp_buff)
temp_buff.allocate_buffers()
ll.set_deltas(temp_buff)
for bb in (b1, b2, b3):
for ll in bb:
ll.allocate()
temp_buff = DeltasTree()
ll.allocate_deltas(temp_buff)
temp_buff.allocate_buffers()
ll.set_deltas(temp_buff)
# Create the combined output buffer
merge_output = be.empty_like(neon_layer.layers[8].outputs)
x = inp
for ll in main2:
x = ll.fprop(x)
start = 0
for bb in (b1, b2, b3):
xb = x
for ll in bb:
xb = ll.fprop(xb)
end = start + xb.shape[0]
merge_output[start:end] = xb
start = end
x = merge_output
top_trunk = Sequential(top).layers
for ll in top_trunk:
x = ll.fprop(x)
neon_out_ref = x.get()
assert allclose_with_out(neon_out, neon_out_ref, rtol=0)
neon_logger.display("Beginning Back prop")
erra = np.random.random(neon_out.shape)
err = be.array(erra)
for ll in reversed(neon_layer.layers[8:]):
err = ll.bprop(err)
neon_deltas = err.get()
for bb, errb in zip((b1, b2, b3), neon_layer.layers[8].error_views):
for ll in reversed(bb):
errb = ll.bprop(errb)
# Now sum up the deltas at the root of the branch layer and compare
ref_deltas = be.zeros_like(b1[0].deltas)
ref_deltas[:] = b3[0].deltas + b2[0].deltas + b1[0].deltas
neon_ref_deltas = ref_deltas.get()
assert allclose_with_out(neon_deltas, neon_ref_deltas, rtol=0)
@pytest.mark.unsupported
@pytest.mark.skip(reason="Not supported for CPU")
def test_branch_model_fork_cpu(backend_cpu64):
from neon.layers import BranchNode, Tree
np.random.seed(0)
be = NervanaObject.be
be.bsz = 32
bnode = BranchNode()
i1 = inception([(32,), (32, 32), ('max', 16)])
top1 = top_branch()
top2 = top_branch()
p1 = Sequential(main_branch() + [bnode, i1] + top1)
p2 = [bnode] + top2
alpha2 = 0.3
neon_layer = Tree([p1, p2], alphas=[1.0, alpha2])
inshape = (4, 224, 224)
insize = np.prod(inshape)
inpa = np.random.random((insize, batch_size))
neon_layer.configure(inshape)
inp = neon_layer.be.array(inpa)
neon_layer.allocate()
neon_layer.layers[0].layers[0].prev_layer = True
neon_layer.allocate_deltas()
neon_out_dev = neon_layer.fprop(inp)
neon_out = [d.get() for d in neon_out_dev]
# Now make the reference pathways:
main_trunk2 = Sequential(main_branch())
main_trunk2.configure(inshape)
main2 = main_trunk2.layers
main2[0].prev_layer = True
main2[0].deltas = be.iobuf(inshape)
branch2 = Sequential(top_branch())
lbranch2 = branch2.layers
(b1, b2, b3) = inception_bare(i1, [(32,), (32, 32), ('max', 16)])
for bb in (b1, b2, b3, lbranch2):
oshape = inshape
for ll in main2 + bb:
oshape = ll.configure(oshape)
main1_trunk = neon_layer.layers[0].layers[:8]
for ll, lo in zip(main2, main1_trunk):
if ll.has_params:
ll.set_params({'params': {'W': lo.W.get()}})
ll.allocate()
temp_deltas = DeltasTree()
temp_deltas.proc_layer(ll)
temp_deltas.allocate_buffers()
ll.set_deltas(temp_deltas)
for ll, lo in zip(lbranch2, neon_layer.layers[1].layers[1:]):
if ll.has_params:
ll.set_params({'params': {'W': lo.W.get()}})
for bb in (b1, b2, b3, lbranch2):
for ll in bb:
ll.allocate()
temp_deltas = DeltasTree()
temp_deltas.proc_layer(ll)
temp_deltas.allocate_buffers()
ll.set_deltas(temp_deltas)
# Create the combined output buffer
merge_output = be.empty_like(neon_layer.layers[0].layers[9].outputs)
x = inp
for ll in main2:
x = ll.fprop(x)
main2_out = x
start = 0
for bb in (b1, b2, b3):
xb = main2_out
for ll in bb:
xb = ll.fprop(xb)
end = start + xb.shape[0]
merge_output[start:end] = xb
start = end
x = merge_output
top_trunk = Sequential(top1).layers
for ll in top_trunk:
x = ll.fprop(x)
neon_out_ref = x.get()
assert allclose_with_out(neon_out_ref, neon_out[0], rtol=0)
# Now do second branch
neon_out_ref2 = branch2.fprop(main2_out).get()
assert allclose_with_out(neon_out_ref2, neon_out[1])
neon_logger.display("Beginning Back prop")
erra = [np.random.random(d.shape) for d in neon_out]
err = [be.array(d) for d in erra]
neon_layer.layers[0].layers[0].deltas = be.iobuf(inshape)
neon_layer.bprop(err)
bottom_neon_deltas = neon_layer.layers[0].layers[1].deltas.get()
middle_neon_deltas = neon_layer.layers[1].layers[1].deltas.get()
err0 = err[0]
for ll in reversed(top_trunk):
err0 = ll.bprop(err0)
err1 = err[1]
for ll in reversed(lbranch2):
err1 = ll.bprop(err1)
for bb, errb in zip((b1, b2, b3), neon_layer.layers[0].layers[-5].error_views):
for ll in reversed(bb):
errb = ll.bprop(errb)
# Now sum up the deltas at the root of the branch layer and compare
ref_deltas = be.zeros_like(b1[0].deltas)
ref_deltas[:] = alpha2 * lbranch2[0].deltas
ref_deltas[:] = ref_deltas + b3[0].deltas + b2[0].deltas + b1[0].deltas
neon_ref_deltas = ref_deltas.get()
assert allclose_with_out(middle_neon_deltas, neon_ref_deltas, rtol=0)
x = ref_deltas
main2[0].deltas = be.iobuf(inshape)
for ll in reversed(main2):
x = ll.bprop(x)
bottom_neon_ref_deltas = main2[1].deltas.get()
assert allclose_with_out(bottom_neon_deltas, bottom_neon_ref_deltas, rtol=0)
if __name__ == '__main__':
test_branch_model_fork()
|
{"hexsha": "46267768f4bcd3f0b219748b978c0cf1355ee310", "size": 24885, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_mergebroadcast_layer.py", "max_stars_repo_name": "rsketine/neon", "max_stars_repo_head_hexsha": "a10f90546d2ddae68c3671f59ba9b513158a91f1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4415, "max_stars_repo_stars_event_min_datetime": "2015-05-04T06:00:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T13:38:45.000Z", "max_issues_repo_path": "tests/test_mergebroadcast_layer.py", "max_issues_repo_name": "EquifAI/neon", "max_issues_repo_head_hexsha": "a10f90546d2ddae68c3671f59ba9b513158a91f1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 446, "max_issues_repo_issues_event_min_datetime": "2015-05-06T20:27:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-29T03:41:06.000Z", "max_forks_repo_path": "tests/test_mergebroadcast_layer.py", "max_forks_repo_name": "EquifAI/neon", "max_forks_repo_head_hexsha": "a10f90546d2ddae68c3671f59ba9b513158a91f1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1060, "max_forks_repo_forks_event_min_datetime": "2015-05-06T19:03:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T07:43:01.000Z", "avg_line_length": 30.5712530713, "max_line_length": 97, "alphanum_fraction": 0.6235081374, "include": true, "reason": "import numpy", "num_tokens": 7310}
|
[STATEMENT]
theorem winding_number_cindex_pathE:
fixes g::"real \<Rightarrow> complex"
assumes "finite_ReZ_segments g z" and "valid_path g" "z \<notin> path_image g" and
loop: "pathfinish g = pathstart g"
shows "winding_number g z = - cindex_pathE g z / 2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
proof (rule finite_ReZ_segment_cases[OF assms(1)])
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) = Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
2. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
fix s
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) = Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
2. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
assume "s \<in> {0..<1}" "s = 0 \<or> Re (g s) = Re z"
and const:"\<forall>t\<in>{s<..<1}. Re (g t) = Re z"
and finite:"finite_ReZ_segments (subpath 0 s g) z"
[PROOF STATE]
proof (state)
this:
s \<in> {0..<1}
s = 0 \<or> Re (g s) = Re z
\<forall>t\<in>{s<..<1}. Re (g t) = Re z
finite_ReZ_segments (subpath 0 s g) z
goal (2 subgoals):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) = Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
2. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
have "Re (g 1) = Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Re (g 1) = Re z
[PROOF STEP]
apply(rule continuous_constant_on_closure[of "{s<..<1}" "\<lambda>t. Re(g t)"])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. continuous_on (closure {s<..<1}) (\<lambda>t. Re (g t))
2. \<And>x. x \<in> {s<..<1} \<Longrightarrow> Re (g x) = Re z
3. 1 \<in> closure {s<..<1}
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on (closure {s<..<1}) (\<lambda>t. Re (g t))
[PROOF STEP]
using valid_path_imp_path[OF \<open>valid_path g\<close>,unfolded path_def] \<open>s\<in>{0..<1}\<close>
[PROOF STATE]
proof (prove)
using this:
continuous_on {0..1} g
s \<in> {0..<1}
goal (1 subgoal):
1. continuous_on (closure {s<..<1}) (\<lambda>t. Re (g t))
[PROOF STEP]
by (auto intro!:continuous_intros continuous_Re elim:continuous_on_subset)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. x \<in> {s<..<1} \<Longrightarrow> Re (g x) = Re z
2. 1 \<in> closure {s<..<1}
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x_ \<in> {s<..<1} \<Longrightarrow> Re (g x_) = Re z
[PROOF STEP]
using const
[PROOF STATE]
proof (prove)
using this:
\<forall>t\<in>{s<..<1}. Re (g t) = Re z
goal (1 subgoal):
1. x_ \<in> {s<..<1} \<Longrightarrow> Re (g x_) = Re z
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 \<in> closure {s<..<1}
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 \<in> closure {s<..<1}
[PROOF STEP]
using \<open>s\<in>{0..<1}\<close>
[PROOF STATE]
proof (prove)
using this:
s \<in> {0..<1}
goal (1 subgoal):
1. 1 \<in> closure {s<..<1}
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
Re (g 1) = Re z
goal (2 subgoals):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) = Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
2. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Re (g 1) = Re z
goal (2 subgoals):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) = Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
2. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Re (g 1) = Re z
[PROOF STEP]
have "Re (g 0) = Re z"
[PROOF STATE]
proof (prove)
using this:
Re (g 1) = Re z
goal (1 subgoal):
1. Re (g 0) = Re z
[PROOF STEP]
using loop
[PROOF STATE]
proof (prove)
using this:
Re (g 1) = Re z
pathfinish g = pathstart g
goal (1 subgoal):
1. Re (g 0) = Re z
[PROOF STEP]
unfolding path_defs
[PROOF STATE]
proof (prove)
using this:
Re (g 1) = Re z
g 1 = g 0
goal (1 subgoal):
1. Re (g 0) = Re z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Re (g 0) = Re z
goal (2 subgoals):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) = Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
2. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
Re (g 1) = Re z
Re (g 0) = Re z
[PROOF STEP]
have "2 * Re (winding_number g z) = - cindex_pathE g z"
[PROOF STATE]
proof (prove)
using this:
Re (g 1) = Re z
Re (g 0) = Re z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
using winding_number_cindex_pathE_aux[of g z] assms(1-3)
[PROOF STATE]
proof (prove)
using this:
Re (g 1) = Re z
Re (g 0) = Re z
\<lbrakk>finite_ReZ_segments g z; valid_path g; z \<notin> path_image g; Re (g 1) = Re z; Re (g 0) = Re z\<rbrakk> \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
finite_ReZ_segments g z
valid_path g
z \<notin> path_image g
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
2 * Re (winding_number g z) = - cindex_pathE g z
goal (2 subgoals):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) = Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
2. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
2 * Re (winding_number g z) = - cindex_pathE g z
goal (2 subgoals):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) = Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
2. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
have "winding_number g z \<in> \<int>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. winding_number g z \<in> \<int>
[PROOF STEP]
using integer_winding_number[OF _ loop \<open>z\<notin>path_image g\<close>] valid_path_imp_path[OF \<open>valid_path g\<close>]
[PROOF STATE]
proof (prove)
using this:
path g \<Longrightarrow> winding_number g z \<in> \<int>
path g
goal (1 subgoal):
1. winding_number g z \<in> \<int>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
winding_number g z \<in> \<int>
goal (2 subgoals):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) = Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
2. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
2 * Re (winding_number g z) = - cindex_pathE g z
winding_number g z \<in> \<int>
[PROOF STEP]
show "winding_number g z = - cindex_pathE g z / 2"
[PROOF STATE]
proof (prove)
using this:
2 * Re (winding_number g z) = - cindex_pathE g z
winding_number g z \<in> \<int>
goal (1 subgoal):
1. winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
by (metis add.right_neutral complex_eq complex_is_Int_iff mult_zero_right
nonzero_mult_div_cancel_left of_real_0 zero_neq_numeral)
[PROOF STATE]
proof (state)
this:
winding_number g z = complex_of_real (- cindex_pathE g z / 2)
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
fix s
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
assume "s \<in> {0..<1}" "s = 0 \<or> Re (g s) = Re z"
and Re_neq:"\<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z"
and finite:"finite_ReZ_segments (subpath 0 s g) z"
[PROOF STATE]
proof (state)
this:
s \<in> {0..<1}
s = 0 \<or> Re (g s) = Re z
\<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z
finite_ReZ_segments (subpath 0 s g) z
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
have "path g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. path g
[PROOF STEP]
using \<open>valid_path g\<close> valid_path_imp_path
[PROOF STATE]
proof (prove)
using this:
valid_path g
valid_path ?g \<Longrightarrow> path ?g
goal (1 subgoal):
1. path g
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
path g
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
let ?goal = "2 * Re (winding_number g z) = - cindex_pathE g z"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
have ?goal when "s=0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have index_ends:"cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z"
when Re_neq:"\<forall>t\<in>{0<..<1}. Re (h t) \<noteq> Re z" and "valid_path h" for h
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
define f where "f = (\<lambda>t. Im (h t - z) / Re (h t - z))"
[PROOF STATE]
proof (state)
this:
f = (\<lambda>t. Im (h t - z) / Re (h t - z))
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
define Ri where "Ri = {x. jumpF f (at_right x) \<noteq> 0 \<and> 0 \<le> x \<and> x < 1}"
[PROOF STATE]
proof (state)
this:
Ri = {x. jumpF f (at_right x) \<noteq> 0 \<and> 0 \<le> x \<and> x < 1}
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
define Le where "Le = {x. jumpF f (at_left x) \<noteq> 0 \<and> 0 < x \<and> x \<le> 1}"
[PROOF STATE]
proof (state)
this:
Le = {x. jumpF f (at_left x) \<noteq> 0 \<and> 0 < x \<and> x \<le> 1}
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
have "path h"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. path h
[PROOF STEP]
using \<open>valid_path h\<close> valid_path_imp_path
[PROOF STATE]
proof (prove)
using this:
valid_path h
valid_path ?g \<Longrightarrow> path ?g
goal (1 subgoal):
1. path h
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
path h
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
have jumpF_eq0: "jumpF f (at_left x) = 0" "jumpF f (at_right x) = 0" when "x\<in>{0<..<1}" for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF f (at_left x) = 0 &&& jumpF f (at_right x) = 0
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. jumpF f (at_left x) = 0
2. jumpF f (at_right x) = 0
[PROOF STEP]
have "Re (h x) \<noteq> Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Re (h x) \<noteq> Re z
[PROOF STEP]
using \<open>\<forall>t\<in>{0<..<1}. Re (h t) \<noteq> Re z\<close> that
[PROOF STATE]
proof (prove)
using this:
\<forall>t\<in>{0<..<1}. Re (h t) \<noteq> Re z
x \<in> {0<..<1}
goal (1 subgoal):
1. Re (h x) \<noteq> Re z
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Re (h x) \<noteq> Re z
goal (2 subgoals):
1. jumpF f (at_left x) = 0
2. jumpF f (at_right x) = 0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Re (h x) \<noteq> Re z
[PROOF STEP]
have "isCont f x"
[PROOF STATE]
proof (prove)
using this:
Re (h x) \<noteq> Re z
goal (1 subgoal):
1. isCont f x
[PROOF STEP]
unfolding f_def
[PROOF STATE]
proof (prove)
using this:
Re (h x) \<noteq> Re z
goal (1 subgoal):
1. isCont (\<lambda>t. Im (h t - z) / Re (h t - z)) x
[PROOF STEP]
using continuous_on_interior[OF \<open>path h\<close>[unfolded path_def]] that
[PROOF STATE]
proof (prove)
using this:
Re (h x) \<noteq> Re z
?x \<in> interior {0..1} \<Longrightarrow> isCont h ?x
x \<in> {0<..<1}
goal (1 subgoal):
1. isCont (\<lambda>t. Im (h t - z) / Re (h t - z)) x
[PROOF STEP]
by (auto intro!: continuous_intros isCont_Im isCont_Re)
[PROOF STATE]
proof (state)
this:
isCont f x
goal (2 subgoals):
1. jumpF f (at_left x) = 0
2. jumpF f (at_right x) = 0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
isCont f x
[PROOF STEP]
show "jumpF f (at_left x) = 0" "jumpF f (at_right x) = 0"
[PROOF STATE]
proof (prove)
using this:
isCont f x
goal (1 subgoal):
1. jumpF f (at_left x) = 0 &&& jumpF f (at_right x) = 0
[PROOF STEP]
unfolding continuous_at_split
[PROOF STATE]
proof (prove)
using this:
continuous (at_left x) f \<and> continuous (at_right x) f
goal (1 subgoal):
1. jumpF f (at_left x) = 0 &&& jumpF f (at_right x) = 0
[PROOF STEP]
by (auto intro: jumpF_not_infinity)
[PROOF STATE]
proof (state)
this:
jumpF f (at_left x) = 0
jumpF f (at_right x) = 0
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
?x \<in> {0<..<1} \<Longrightarrow> jumpF f (at_left ?x) = 0
?x \<in> {0<..<1} \<Longrightarrow> jumpF f (at_right ?x) = 0
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
have "cindex_pathE h z = cindexE 0 1 f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindex_pathE h z = cindexE 0 1 f
[PROOF STEP]
unfolding cindex_pathE_def f_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindexE 0 1 (\<lambda>t. Im (h t - z) / Re (h t - z)) = cindexE 0 1 (\<lambda>t. Im (h t - z) / Re (h t - z))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
cindex_pathE h z = cindexE 0 1 f
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cindex_pathE h z = cindexE 0 1 f
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
have "... = sum (\<lambda>x. jumpF f (at_right x)) Ri - sum (\<lambda>x. jumpF f (at_left x)) Le"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindexE 0 1 f = (\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x))
[PROOF STEP]
unfolding cindexE_def Ri_def Le_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>x | jumpF f (at_right x) \<noteq> 0 \<and> 0 \<le> x \<and> x < 1. jumpF f (at_right x)) - (\<Sum>x | jumpF f (at_left x) \<noteq> 0 \<and> 0 < x \<and> x \<le> 1. jumpF f (at_left x)) = (\<Sum>x | jumpF f (at_right x) \<noteq> 0 \<and> 0 \<le> x \<and> x < 1. jumpF f (at_right x)) - (\<Sum>x | jumpF f (at_left x) \<noteq> 0 \<and> 0 < x \<and> x \<le> 1. jumpF f (at_left x))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cindexE 0 1 f = (\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x))
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cindexE 0 1 f = (\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x))
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
have "... = jumpF f (at_right 0) - jumpF f (at_left 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_right 0) - jumpF f (at_left 1)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_right 0) - jumpF f (at_left 1)
[PROOF STEP]
have "sum (\<lambda>x. jumpF f (at_right x)) Ri = jumpF f (at_right 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
proof (cases "jumpF f (at_right 0) = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. jumpF f (at_right 0) = 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
2. jumpF f (at_right 0) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
jumpF f (at_right 0) = 0
goal (2 subgoals):
1. jumpF f (at_right 0) = 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
2. jumpF f (at_right 0) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
hence False if "x \<in> Ri" for x
[PROOF STATE]
proof (prove)
using this:
jumpF f (at_right 0) = 0
goal (1 subgoal):
1. False
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
jumpF f (at_right 0) = 0
x \<in> Ri
goal (1 subgoal):
1. False
[PROOF STEP]
by (cases "x = 0") (auto simp: jumpF_eq0 Ri_def)
[PROOF STATE]
proof (state)
this:
?x \<in> Ri \<Longrightarrow> False
goal (2 subgoals):
1. jumpF f (at_right 0) = 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
2. jumpF f (at_right 0) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
hence "Ri = {}"
[PROOF STATE]
proof (prove)
using this:
?x \<in> Ri \<Longrightarrow> False
goal (1 subgoal):
1. Ri = {}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Ri = {}
goal (2 subgoals):
1. jumpF f (at_right 0) = 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
2. jumpF f (at_right 0) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Ri = {}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Ri = {}
goal (1 subgoal):
1. (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
using True
[PROOF STATE]
proof (prove)
using this:
Ri = {}
jumpF f (at_right 0) = 0
goal (1 subgoal):
1. (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
goal (1 subgoal):
1. jumpF f (at_right 0) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF f (at_right 0) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
jumpF f (at_right 0) \<noteq> 0
goal (1 subgoal):
1. jumpF f (at_right 0) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
hence "x \<in> Ri \<longleftrightarrow> x = 0" for x
[PROOF STATE]
proof (prove)
using this:
jumpF f (at_right 0) \<noteq> 0
goal (1 subgoal):
1. (x \<in> Ri) = (x = 0)
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
jumpF f (at_right 0) \<noteq> 0
\<forall>t\<in>{0<..<1}. Re (h t) \<noteq> Re z
valid_path h
goal (1 subgoal):
1. (x \<in> Ri) = (x = 0)
[PROOF STEP]
by (cases "x = 0") (auto simp: jumpF_eq0 Ri_def)
[PROOF STATE]
proof (state)
this:
(?x \<in> Ri) = (?x = 0)
goal (1 subgoal):
1. jumpF f (at_right 0) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(?x \<in> Ri) = (?x = 0)
[PROOF STEP]
have "Ri = {0}"
[PROOF STATE]
proof (prove)
using this:
(?x \<in> Ri) = (?x = 0)
goal (1 subgoal):
1. Ri = {0}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
Ri = {0}
goal (1 subgoal):
1. jumpF f (at_right 0) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Ri = {0}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Ri = {0}
goal (1 subgoal):
1. (\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
goal (1 subgoal):
1. (\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_right 0) - jumpF f (at_left 1)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
goal (1 subgoal):
1. (\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_right 0) - jumpF f (at_left 1)
[PROOF STEP]
have "sum (\<lambda>x. jumpF f (at_left x)) Le = jumpF f (at_left 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
proof (cases "jumpF f (at_left 1) = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. jumpF f (at_left 1) = 0 \<Longrightarrow> (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
2. jumpF f (at_left 1) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
jumpF f (at_left 1) = 0
goal (2 subgoals):
1. jumpF f (at_left 1) = 0 \<Longrightarrow> (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
2. jumpF f (at_left 1) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
jumpF f (at_left 1) = 0
[PROOF STEP]
have "Le = {}"
[PROOF STATE]
proof (prove)
using this:
jumpF f (at_left 1) = 0
goal (1 subgoal):
1. Le = {}
[PROOF STEP]
unfolding Le_def
[PROOF STATE]
proof (prove)
using this:
jumpF f (at_left 1) = 0
goal (1 subgoal):
1. {x. jumpF f (at_left x) \<noteq> 0 \<and> 0 < x \<and> x \<le> 1} = {}
[PROOF STEP]
using jumpF_eq0(1) greaterThanLessThan_iff
[PROOF STATE]
proof (prove)
using this:
jumpF f (at_left 1) = 0
?x \<in> {0<..<1} \<Longrightarrow> jumpF f (at_left ?x) = 0
(?i \<in> {?l<..<?u}) = (?l < ?i \<and> ?i < ?u)
goal (1 subgoal):
1. {x. jumpF f (at_left x) \<noteq> 0 \<and> 0 < x \<and> x \<le> 1} = {}
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
Le = {}
goal (2 subgoals):
1. jumpF f (at_left 1) = 0 \<Longrightarrow> (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
2. jumpF f (at_left 1) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Le = {}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Le = {}
goal (1 subgoal):
1. (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
using True
[PROOF STATE]
proof (prove)
using this:
Le = {}
jumpF f (at_left 1) = 0
goal (1 subgoal):
1. (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
goal (1 subgoal):
1. jumpF f (at_left 1) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF f (at_left 1) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
jumpF f (at_left 1) \<noteq> 0
goal (1 subgoal):
1. jumpF f (at_left 1) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
jumpF f (at_left 1) \<noteq> 0
[PROOF STEP]
have "Le = {1}"
[PROOF STATE]
proof (prove)
using this:
jumpF f (at_left 1) \<noteq> 0
goal (1 subgoal):
1. Le = {1}
[PROOF STEP]
unfolding Le_def
[PROOF STATE]
proof (prove)
using this:
jumpF f (at_left 1) \<noteq> 0
goal (1 subgoal):
1. {x. jumpF f (at_left x) \<noteq> 0 \<and> 0 < x \<and> x \<le> 1} = {1}
[PROOF STEP]
using jumpF_eq0(1) greaterThanLessThan_iff
[PROOF STATE]
proof (prove)
using this:
jumpF f (at_left 1) \<noteq> 0
?x \<in> {0<..<1} \<Longrightarrow> jumpF f (at_left ?x) = 0
(?i \<in> {?l<..<?u}) = (?l < ?i \<and> ?i < ?u)
goal (1 subgoal):
1. {x. jumpF f (at_left x) \<noteq> 0 \<and> 0 < x \<and> x \<le> 1} = {1}
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
Le = {1}
goal (1 subgoal):
1. jumpF f (at_left 1) \<noteq> 0 \<Longrightarrow> (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Le = {1}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Le = {1}
goal (1 subgoal):
1. (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
goal (1 subgoal):
1. (\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_right 0) - jumpF f (at_left 1)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
(\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<Sum>x\<in>Ri. jumpF f (at_right x)) = jumpF f (at_right 0)
(\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_left 1)
goal (1 subgoal):
1. (\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_right 0) - jumpF f (at_left 1)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_right 0) - jumpF f (at_left 1)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_right 0) - jumpF f (at_left 1)
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Sum>x\<in>Ri. jumpF f (at_right x)) - (\<Sum>x\<in>Le. jumpF f (at_left x)) = jumpF f (at_right 0) - jumpF f (at_left 1)
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
have "... = jumpF_pathstart h z - jumpF_pathfinish h z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF f (at_right 0) - jumpF f (at_left 1) = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
unfolding jumpF_pathstart_def jumpF_pathfinish_def f_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF (\<lambda>t. Im (h t - z) / Re (h t - z)) (at_right 0) - jumpF (\<lambda>t. Im (h t - z) / Re (h t - z)) (at_left 1) = jumpF (\<lambda>t. Im (h t - z) / Re (h t - z)) (at_right 0) - jumpF (\<lambda>t. Im (h t - z) / Re (h t - z)) (at_left 1)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
jumpF f (at_right 0) - jumpF f (at_left 1) = jumpF_pathstart h z - jumpF_pathfinish h z
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
goal (1 subgoal):
1. cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
cindex_pathE h z = jumpF_pathstart h z - jumpF_pathfinish h z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<lbrakk>\<forall>t\<in>{0<..<1}. Re (?h t) \<noteq> Re z; valid_path ?h\<rbrakk> \<Longrightarrow> cindex_pathE ?h z = jumpF_pathstart ?h z - jumpF_pathfinish ?h z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
define fI where "fI=(\<lambda>t. Im (g t - z))"
[PROOF STATE]
proof (state)
this:
fI = (\<lambda>t. Im (g t - z))
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
define fR where "fR=(\<lambda>t. Re (g t - z))"
[PROOF STATE]
proof (state)
this:
fR = (\<lambda>t. Re (g t - z))
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have fI: "(fI \<longlongrightarrow> fI 0) (at_right 0)" "(fI \<longlongrightarrow> fI 1) (at_left 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (fI \<longlongrightarrow> fI 0) (at_right 0) &&& (fI \<longlongrightarrow> fI 1) (at_left 1)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
2. (fI \<longlongrightarrow> fI 1) (at_left 1)
[PROOF STEP]
have "continuous (at_right 0) fI"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous (at_right 0) fI
[PROOF STEP]
apply (rule continuous_on_at_right[of _ 1])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. continuous_on {0..1} fI
2. 0 < 1
[PROOF STEP]
using \<open>path g\<close>
[PROOF STATE]
proof (prove)
using this:
path g
goal (2 subgoals):
1. continuous_on {0..1} fI
2. 0 < 1
[PROOF STEP]
unfolding fI_def path_def
[PROOF STATE]
proof (prove)
using this:
continuous_on {0..1} g
goal (2 subgoals):
1. continuous_on {0..1} (\<lambda>t. Im (g t - z))
2. 0 < 1
[PROOF STEP]
by (auto intro:continuous_intros)
[PROOF STATE]
proof (state)
this:
continuous (at_right 0) fI
goal (2 subgoals):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
2. (fI \<longlongrightarrow> fI 1) (at_left 1)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
continuous (at_right 0) fI
[PROOF STEP]
show "(fI \<longlongrightarrow> fI 0) (at_right 0)"
[PROOF STATE]
proof (prove)
using this:
continuous (at_right 0) fI
goal (1 subgoal):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
[PROOF STEP]
by (simp add: continuous_within)
[PROOF STATE]
proof (state)
this:
(fI \<longlongrightarrow> fI 0) (at_right 0)
goal (1 subgoal):
1. (fI \<longlongrightarrow> fI 1) (at_left 1)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (fI \<longlongrightarrow> fI 1) (at_left 1)
[PROOF STEP]
have "continuous (at_left 1) fI"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous (at_left 1) fI
[PROOF STEP]
apply (rule continuous_on_at_left[of 0])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. continuous_on {0..1} fI
2. 0 < 1
[PROOF STEP]
using \<open>path g\<close>
[PROOF STATE]
proof (prove)
using this:
path g
goal (2 subgoals):
1. continuous_on {0..1} fI
2. 0 < 1
[PROOF STEP]
unfolding fI_def path_def
[PROOF STATE]
proof (prove)
using this:
continuous_on {0..1} g
goal (2 subgoals):
1. continuous_on {0..1} (\<lambda>t. Im (g t - z))
2. 0 < 1
[PROOF STEP]
by (auto intro:continuous_intros)
[PROOF STATE]
proof (state)
this:
continuous (at_left 1) fI
goal (1 subgoal):
1. (fI \<longlongrightarrow> fI 1) (at_left 1)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
continuous (at_left 1) fI
[PROOF STEP]
show "(fI \<longlongrightarrow> fI 1) (at_left 1)"
[PROOF STATE]
proof (prove)
using this:
continuous (at_left 1) fI
goal (1 subgoal):
1. (fI \<longlongrightarrow> fI 1) (at_left 1)
[PROOF STEP]
by (simp add: continuous_within)
[PROOF STATE]
proof (state)
this:
(fI \<longlongrightarrow> fI 1) (at_left 1)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(fI \<longlongrightarrow> fI 0) (at_right 0)
(fI \<longlongrightarrow> fI 1) (at_left 1)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have fR: "(fR \<longlongrightarrow> 0) (at_right 0)" "(fR \<longlongrightarrow> 0) (at_left 1)" when "Re (g 0) = Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (fR \<longlongrightarrow> 0) (at_right 0) &&& (fR \<longlongrightarrow> 0) (at_left 1)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. (fR \<longlongrightarrow> 0) (at_right 0)
2. (fR \<longlongrightarrow> 0) (at_left 1)
[PROOF STEP]
have "continuous (at_right 0) fR"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous (at_right 0) fR
[PROOF STEP]
apply (rule continuous_on_at_right[of _ 1])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. continuous_on {0..1} fR
2. 0 < 1
[PROOF STEP]
using \<open>path g\<close>
[PROOF STATE]
proof (prove)
using this:
path g
goal (2 subgoals):
1. continuous_on {0..1} fR
2. 0 < 1
[PROOF STEP]
unfolding fR_def path_def
[PROOF STATE]
proof (prove)
using this:
continuous_on {0..1} g
goal (2 subgoals):
1. continuous_on {0..1} (\<lambda>t. Re (g t - z))
2. 0 < 1
[PROOF STEP]
by (auto intro:continuous_intros)
[PROOF STATE]
proof (state)
this:
continuous (at_right 0) fR
goal (2 subgoals):
1. (fR \<longlongrightarrow> 0) (at_right 0)
2. (fR \<longlongrightarrow> 0) (at_left 1)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
continuous (at_right 0) fR
[PROOF STEP]
show "(fR \<longlongrightarrow> 0) (at_right 0)"
[PROOF STATE]
proof (prove)
using this:
continuous (at_right 0) fR
goal (1 subgoal):
1. (fR \<longlongrightarrow> 0) (at_right 0)
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
continuous (at_right 0) fR
Re (g 0) = Re z
goal (1 subgoal):
1. (fR \<longlongrightarrow> 0) (at_right 0)
[PROOF STEP]
unfolding fR_def
[PROOF STATE]
proof (prove)
using this:
continuous (at_right 0) (\<lambda>t. Re (g t - z))
Re (g 0) = Re z
goal (1 subgoal):
1. ((\<lambda>t. Re (g t - z)) \<longlongrightarrow> 0) (at_right 0)
[PROOF STEP]
by (simp add: continuous_within)
[PROOF STATE]
proof (state)
this:
(fR \<longlongrightarrow> 0) (at_right 0)
goal (1 subgoal):
1. (fR \<longlongrightarrow> 0) (at_left 1)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (fR \<longlongrightarrow> 0) (at_left 1)
[PROOF STEP]
have "continuous (at_left 1) fR"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous (at_left 1) fR
[PROOF STEP]
apply (rule continuous_on_at_left[of 0])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. continuous_on {0..1} fR
2. 0 < 1
[PROOF STEP]
using \<open>path g\<close>
[PROOF STATE]
proof (prove)
using this:
path g
goal (2 subgoals):
1. continuous_on {0..1} fR
2. 0 < 1
[PROOF STEP]
unfolding fR_def path_def
[PROOF STATE]
proof (prove)
using this:
continuous_on {0..1} g
goal (2 subgoals):
1. continuous_on {0..1} (\<lambda>t. Re (g t - z))
2. 0 < 1
[PROOF STEP]
by (auto intro:continuous_intros)
[PROOF STATE]
proof (state)
this:
continuous (at_left 1) fR
goal (1 subgoal):
1. (fR \<longlongrightarrow> 0) (at_left 1)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
continuous (at_left 1) fR
[PROOF STEP]
show "(fR \<longlongrightarrow> 0) (at_left 1)"
[PROOF STATE]
proof (prove)
using this:
continuous (at_left 1) fR
goal (1 subgoal):
1. (fR \<longlongrightarrow> 0) (at_left 1)
[PROOF STEP]
using that loop
[PROOF STATE]
proof (prove)
using this:
continuous (at_left 1) fR
Re (g 0) = Re z
pathfinish g = pathstart g
goal (1 subgoal):
1. (fR \<longlongrightarrow> 0) (at_left 1)
[PROOF STEP]
unfolding fR_def path_defs
[PROOF STATE]
proof (prove)
using this:
continuous (at_left 1) (\<lambda>t. Re (g t - z))
Re (g 0) = Re z
g 1 = g 0
goal (1 subgoal):
1. ((\<lambda>t. Re (g t - z)) \<longlongrightarrow> 0) (at_left 1)
[PROOF STEP]
by (simp add: continuous_within)
[PROOF STATE]
proof (state)
this:
(fR \<longlongrightarrow> 0) (at_left 1)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Re (g 0) = Re z \<Longrightarrow> (fR \<longlongrightarrow> 0) (at_right 0)
Re (g 0) = Re z \<Longrightarrow> (fR \<longlongrightarrow> 0) (at_left 1)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have "(\<forall>t\<in>{0<..<1}. Re (g t) > Re z) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)) \<Longrightarrow> False
[PROOF STEP]
assume " \<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z))"
[PROOF STATE]
proof (state)
this:
\<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z))
goal (1 subgoal):
1. \<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)) \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z))
[PROOF STEP]
obtain t1 t2 where t:"t1\<in>{0<..<1}" "t2\<in>{0<..<1}" "Re (g t1)\<le>Re z" "Re (g t2)\<ge>Re z"
[PROOF STATE]
proof (prove)
using this:
\<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z))
goal (1 subgoal):
1. (\<And>t1 t2. \<lbrakk>t1 \<in> {0<..<1}; t2 \<in> {0<..<1}; Re (g t1) \<le> Re z; Re z \<le> Re (g t2)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding path_image_def
[PROOF STATE]
proof (prove)
using this:
\<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z))
goal (1 subgoal):
1. (\<And>t1 t2. \<lbrakk>t1 \<in> {0<..<1}; t2 \<in> {0<..<1}; Re (g t1) \<le> Re z; Re z \<le> Re (g t2)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t1 \<in> {0<..<1}
t2 \<in> {0<..<1}
Re (g t1) \<le> Re z
Re z \<le> Re (g t2)
goal (1 subgoal):
1. \<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)) \<Longrightarrow> False
[PROOF STEP]
have False when "t1\<le>t2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. False
[PROOF STEP]
have "continuous_on {t1..t2} (\<lambda>t. Re (g t))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on {t1..t2} (\<lambda>t. Re (g t))
[PROOF STEP]
using valid_path_imp_path[OF \<open>valid_path g\<close>] t
[PROOF STATE]
proof (prove)
using this:
path g
t1 \<in> {0<..<1}
t2 \<in> {0<..<1}
Re (g t1) \<le> Re z
Re z \<le> Re (g t2)
goal (1 subgoal):
1. continuous_on {t1..t2} (\<lambda>t. Re (g t))
[PROOF STEP]
unfolding path_def
[PROOF STATE]
proof (prove)
using this:
continuous_on {0..1} g
t1 \<in> {0<..<1}
t2 \<in> {0<..<1}
Re (g t1) \<le> Re z
Re z \<le> Re (g t2)
goal (1 subgoal):
1. continuous_on {t1..t2} (\<lambda>t. Re (g t))
[PROOF STEP]
by (metis (full_types) atLeastatMost_subset_iff continuous_on_Re continuous_on_subset
eucl_less_le_not_le greaterThanLessThan_iff)
[PROOF STATE]
proof (state)
this:
continuous_on {t1..t2} (\<lambda>t. Re (g t))
goal (1 subgoal):
1. False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
continuous_on {t1..t2} (\<lambda>t. Re (g t))
[PROOF STEP]
obtain t' where t':"t'\<ge>t1" "t'\<le>t2" "Re (g t') = Re z"
[PROOF STATE]
proof (prove)
using this:
continuous_on {t1..t2} (\<lambda>t. Re (g t))
goal (1 subgoal):
1. (\<And>t'. \<lbrakk>t1 \<le> t'; t' \<le> t2; Re (g t') = Re z\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using IVT'[of "\<lambda>t. Re (g t)" t1 _ t2] t \<open>t1\<le>t2\<close>
[PROOF STATE]
proof (prove)
using this:
continuous_on {t1..t2} (\<lambda>t. Re (g t))
\<lbrakk>Re (g t1) \<le> ?y; ?y \<le> Re (g t2); t1 \<le> t2; continuous_on {t1..t2} (\<lambda>t. Re (g t))\<rbrakk> \<Longrightarrow> \<exists>x\<ge>t1. x \<le> t2 \<and> Re (g x) = ?y
t1 \<in> {0<..<1}
t2 \<in> {0<..<1}
Re (g t1) \<le> Re z
Re z \<le> Re (g t2)
t1 \<le> t2
goal (1 subgoal):
1. (\<And>t'. \<lbrakk>t1 \<le> t'; t' \<le> t2; Re (g t') = Re z\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t1 \<le> t'
t' \<le> t2
Re (g t') = Re z
goal (1 subgoal):
1. False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t1 \<le> t'
t' \<le> t2
Re (g t') = Re z
[PROOF STEP]
have "t'\<in>{0<..<1}"
[PROOF STATE]
proof (prove)
using this:
t1 \<le> t'
t' \<le> t2
Re (g t') = Re z
goal (1 subgoal):
1. t' \<in> {0<..<1}
[PROOF STEP]
using t
[PROOF STATE]
proof (prove)
using this:
t1 \<le> t'
t' \<le> t2
Re (g t') = Re z
t1 \<in> {0<..<1}
t2 \<in> {0<..<1}
Re (g t1) \<le> Re z
Re z \<le> Re (g t2)
goal (1 subgoal):
1. t' \<in> {0<..<1}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t' \<in> {0<..<1}
goal (1 subgoal):
1. False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t' \<in> {0<..<1}
[PROOF STEP]
have "Re (g t') \<noteq> Re z"
[PROOF STATE]
proof (prove)
using this:
t' \<in> {0<..<1}
goal (1 subgoal):
1. Re (g t') \<noteq> Re z
[PROOF STEP]
using Re_neq \<open>s=0\<close>
[PROOF STATE]
proof (prove)
using this:
t' \<in> {0<..<1}
\<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z
s = 0
goal (1 subgoal):
1. Re (g t') \<noteq> Re z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Re (g t') \<noteq> Re z
goal (1 subgoal):
1. False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Re (g t') \<noteq> Re z
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
Re (g t') \<noteq> Re z
goal (1 subgoal):
1. False
[PROOF STEP]
using \<open>Re (g t') = Re z\<close>
[PROOF STATE]
proof (prove)
using this:
Re (g t') \<noteq> Re z
Re (g t') = Re z
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
t1 \<le> t2 \<Longrightarrow> False
goal (1 subgoal):
1. \<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)) \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
t1 \<le> t2 \<Longrightarrow> False
goal (1 subgoal):
1. \<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)) \<Longrightarrow> False
[PROOF STEP]
have False when "t1\<ge>t2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. False
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. False
[PROOF STEP]
have "continuous_on {t2..t1} (\<lambda>t. Re (g t))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on {t2..t1} (\<lambda>t. Re (g t))
[PROOF STEP]
using valid_path_imp_path[OF \<open>valid_path g\<close>] t
[PROOF STATE]
proof (prove)
using this:
path g
t1 \<in> {0<..<1}
t2 \<in> {0<..<1}
Re (g t1) \<le> Re z
Re z \<le> Re (g t2)
goal (1 subgoal):
1. continuous_on {t2..t1} (\<lambda>t. Re (g t))
[PROOF STEP]
unfolding path_def
[PROOF STATE]
proof (prove)
using this:
continuous_on {0..1} g
t1 \<in> {0<..<1}
t2 \<in> {0<..<1}
Re (g t1) \<le> Re z
Re z \<le> Re (g t2)
goal (1 subgoal):
1. continuous_on {t2..t1} (\<lambda>t. Re (g t))
[PROOF STEP]
by (metis (full_types) atLeastatMost_subset_iff continuous_on_Re continuous_on_subset
eucl_less_le_not_le greaterThanLessThan_iff)
[PROOF STATE]
proof (state)
this:
continuous_on {t2..t1} (\<lambda>t. Re (g t))
goal (1 subgoal):
1. False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
continuous_on {t2..t1} (\<lambda>t. Re (g t))
[PROOF STEP]
obtain t' where t':"t'\<le>t1" "t'\<ge>t2" "Re (g t') = Re z"
[PROOF STATE]
proof (prove)
using this:
continuous_on {t2..t1} (\<lambda>t. Re (g t))
goal (1 subgoal):
1. (\<And>t'. \<lbrakk>t' \<le> t1; t2 \<le> t'; Re (g t') = Re z\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using IVT2'[of "\<lambda>t. Re (g t)" t1 _ t2] t \<open>t1\<ge>t2\<close>
[PROOF STATE]
proof (prove)
using this:
continuous_on {t2..t1} (\<lambda>t. Re (g t))
\<lbrakk>Re (g t1) \<le> ?y; ?y \<le> Re (g t2); t2 \<le> t1; continuous_on {t2..t1} (\<lambda>t. Re (g t))\<rbrakk> \<Longrightarrow> \<exists>x\<ge>t2. x \<le> t1 \<and> Re (g x) = ?y
t1 \<in> {0<..<1}
t2 \<in> {0<..<1}
Re (g t1) \<le> Re z
Re z \<le> Re (g t2)
t2 \<le> t1
goal (1 subgoal):
1. (\<And>t'. \<lbrakk>t' \<le> t1; t2 \<le> t'; Re (g t') = Re z\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t' \<le> t1
t2 \<le> t'
Re (g t') = Re z
goal (1 subgoal):
1. False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t' \<le> t1
t2 \<le> t'
Re (g t') = Re z
[PROOF STEP]
have "t'\<in>{0<..<1}"
[PROOF STATE]
proof (prove)
using this:
t' \<le> t1
t2 \<le> t'
Re (g t') = Re z
goal (1 subgoal):
1. t' \<in> {0<..<1}
[PROOF STEP]
using t
[PROOF STATE]
proof (prove)
using this:
t' \<le> t1
t2 \<le> t'
Re (g t') = Re z
t1 \<in> {0<..<1}
t2 \<in> {0<..<1}
Re (g t1) \<le> Re z
Re z \<le> Re (g t2)
goal (1 subgoal):
1. t' \<in> {0<..<1}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t' \<in> {0<..<1}
goal (1 subgoal):
1. False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t' \<in> {0<..<1}
[PROOF STEP]
have "Re (g t') \<noteq> Re z"
[PROOF STATE]
proof (prove)
using this:
t' \<in> {0<..<1}
goal (1 subgoal):
1. Re (g t') \<noteq> Re z
[PROOF STEP]
using Re_neq \<open>s=0\<close>
[PROOF STATE]
proof (prove)
using this:
t' \<in> {0<..<1}
\<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z
s = 0
goal (1 subgoal):
1. Re (g t') \<noteq> Re z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Re (g t') \<noteq> Re z
goal (1 subgoal):
1. False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Re (g t') \<noteq> Re z
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
Re (g t') \<noteq> Re z
goal (1 subgoal):
1. False
[PROOF STEP]
using \<open>Re (g t') = Re z\<close>
[PROOF STATE]
proof (prove)
using this:
Re (g t') \<noteq> Re z
Re (g t') = Re z
goal (1 subgoal):
1. False
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
t2 \<le> t1 \<Longrightarrow> False
goal (1 subgoal):
1. \<not> ((\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)) \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
t1 \<le> t2 \<Longrightarrow> False
t2 \<le> t1 \<Longrightarrow> False
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
t1 \<le> t2 \<Longrightarrow> False
t2 \<le> t1 \<Longrightarrow> False
goal (1 subgoal):
1. False
[PROOF STEP]
by linarith
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
(\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have ?thesis when Re_pos:"\<forall>t\<in>{0<..<1}. Re (g t) > Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have "Re (winding_number g z) = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Re (winding_number g z) = 0
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Re (winding_number g z) = 0
[PROOF STEP]
have "\<forall>p\<in>path_image g. Re z \<le> Re p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>p\<in>path_image g. Re z \<le> Re p
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>p. p \<in> path_image g \<Longrightarrow> Re z \<le> Re p
[PROOF STEP]
fix p
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>p. p \<in> path_image g \<Longrightarrow> Re z \<le> Re p
[PROOF STEP]
assume "p \<in> path_image g"
[PROOF STATE]
proof (state)
this:
p \<in> path_image g
goal (1 subgoal):
1. \<And>p. p \<in> path_image g \<Longrightarrow> Re z \<le> Re p
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
p \<in> path_image g
[PROOF STEP]
obtain t where "0\<le>t" "t\<le>1" "p = g t"
[PROOF STATE]
proof (prove)
using this:
p \<in> path_image g
goal (1 subgoal):
1. (\<And>t. \<lbrakk>0 \<le> t; t \<le> 1; p = g t\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding path_image_def
[PROOF STATE]
proof (prove)
using this:
p \<in> g ` {0..1}
goal (1 subgoal):
1. (\<And>t. \<lbrakk>0 \<le> t; t \<le> 1; p = g t\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> t
t \<le> 1
p = g t
goal (1 subgoal):
1. \<And>p. p \<in> path_image g \<Longrightarrow> Re z \<le> Re p
[PROOF STEP]
have "Re z \<le> Re (g t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Re z \<le> Re (g t)
[PROOF STEP]
apply (rule continuous_ge_on_closure[of "{0<..<1}" "\<lambda>t. Re (g t)" t "Re z",simplified])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. continuous_on {0..1} (\<lambda>x. Re (g x))
2. 0 \<le> t \<and> t \<le> 1
3. \<And>x. 0 < x \<and> x < 1 \<Longrightarrow> Re z \<le> Re (g x)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on {0..1} (\<lambda>x. Re (g x))
[PROOF STEP]
using valid_path_imp_path[OF \<open>valid_path g\<close>,unfolded path_def]
[PROOF STATE]
proof (prove)
using this:
continuous_on {0..1} g
goal (1 subgoal):
1. continuous_on {0..1} (\<lambda>x. Re (g x))
[PROOF STEP]
by (auto intro:continuous_intros)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. 0 \<le> t \<and> t \<le> 1
2. \<And>x. 0 < x \<and> x < 1 \<Longrightarrow> Re z \<le> Re (g x)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> t \<and> t \<le> 1
[PROOF STEP]
using \<open>0\<le>t\<close> \<open>t\<le>1\<close>
[PROOF STATE]
proof (prove)
using this:
0 \<le> t
t \<le> 1
goal (1 subgoal):
1. 0 \<le> t \<and> t \<le> 1
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. 0 < x \<and> x < 1 \<Longrightarrow> Re z \<le> Re (g x)
[PROOF STEP]
subgoal for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < x \<and> x < 1 \<Longrightarrow> Re z \<le> Re (g x)
[PROOF STEP]
using that[rule_format,of x]
[PROOF STATE]
proof (prove)
using this:
x \<in> {0<..<1} \<Longrightarrow> Re z < Re (g x)
goal (1 subgoal):
1. 0 < x \<and> x < 1 \<Longrightarrow> Re z \<le> Re (g x)
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
Re z \<le> Re (g t)
goal (1 subgoal):
1. \<And>p. p \<in> path_image g \<Longrightarrow> Re z \<le> Re p
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Re z \<le> Re (g t)
[PROOF STEP]
show "Re z \<le> Re p"
[PROOF STATE]
proof (prove)
using this:
Re z \<le> Re (g t)
goal (1 subgoal):
1. Re z \<le> Re p
[PROOF STEP]
using \<open>p = g t\<close>
[PROOF STATE]
proof (prove)
using this:
Re z \<le> Re (g t)
p = g t
goal (1 subgoal):
1. Re z \<le> Re p
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Re z \<le> Re p
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>p\<in>path_image g. Re z \<le> Re p
goal (1 subgoal):
1. Re (winding_number g z) = 0
[PROOF STEP]
from Re_winding_number_half_right[OF this \<open>valid_path g\<close> \<open>z\<notin>path_image g\<close>] loop
[PROOF STATE]
proof (chain)
picking this:
Re (winding_number g z) = (Im (Ln (pathfinish g - z)) - Im (Ln (pathstart g - z))) / (2 * pi)
pathfinish g = pathstart g
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Re (winding_number g z) = (Im (Ln (pathfinish g - z)) - Im (Ln (pathstart g - z))) / (2 * pi)
pathfinish g = pathstart g
goal (1 subgoal):
1. Re (winding_number g z) = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Re (winding_number g z) = 0
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Re (winding_number g z) = 0
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Re (winding_number g z) = 0
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have "cindex_pathE g z = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
have "cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
[PROOF STEP]
using index_ends[OF _ \<open>valid_path g\<close>] Re_neq \<open>s=0\<close>
[PROOF STATE]
proof (prove)
using this:
\<forall>t\<in>{0<..<1}. Re (g t) \<noteq> Re z \<Longrightarrow> cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
\<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z
s = 0
goal (1 subgoal):
1. cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
have "jumpF_pathstart g z = jumpF_pathfinish g z" when "Re (g 0) \<noteq> Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathstart g z = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = 0
[PROOF STEP]
using jumpF_pathstart_eq_0[OF \<open>path g\<close>] that
[PROOF STATE]
proof (prove)
using this:
Re (pathstart g) \<noteq> Re ?z \<Longrightarrow> jumpF_pathstart g ?z = 0
Re (g 0) \<noteq> Re z
goal (1 subgoal):
1. jumpF_pathstart g z = 0
[PROOF STEP]
unfolding path_defs
[PROOF STATE]
proof (prove)
using this:
Re (g 0) \<noteq> Re ?z \<Longrightarrow> jumpF_pathstart g ?z = 0
Re (g 0) \<noteq> Re z
goal (1 subgoal):
1. jumpF_pathstart g z = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathfinish g z=0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathfinish g z = 0
[PROOF STEP]
using jumpF_pathfinish_eq_0[OF \<open>path g\<close>] that loop
[PROOF STATE]
proof (prove)
using this:
Re (pathfinish g) \<noteq> Re ?z \<Longrightarrow> jumpF_pathfinish g ?z = 0
Re (g 0) \<noteq> Re z
pathfinish g = pathstart g
goal (1 subgoal):
1. jumpF_pathfinish g z = 0
[PROOF STEP]
unfolding path_defs
[PROOF STATE]
proof (prove)
using this:
Re (g 1) \<noteq> Re ?z \<Longrightarrow> jumpF_pathfinish g ?z = 0
Re (g 0) \<noteq> Re z
g 1 = g 0
goal (1 subgoal):
1. jumpF_pathfinish g z = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathfinish g z = 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
jumpF_pathstart g z = 0
jumpF_pathfinish g z = 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
jumpF_pathstart g z = 0
jumpF_pathfinish g z = 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = jumpF_pathfinish g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Re (g 0) \<noteq> Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Re (g 0) \<noteq> Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
have "jumpF_pathstart g z = jumpF_pathfinish g z" when "Re (g 0) = Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have [simp]:"(fR has_sgnx 1) (at_right 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (fR has_sgnx 1) (at_right 0)
[PROOF STEP]
unfolding fR_def has_sgnx_def eventually_at_right
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>b>0. \<forall>y>0. y < b \<longrightarrow> sgn (Re (g y - z)) = 1
[PROOF STEP]
apply (rule exI[where x=1])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < 1 \<and> (\<forall>y>0. y < 1 \<longrightarrow> sgn (Re (g y - z)) = 1)
[PROOF STEP]
using Re_pos
[PROOF STATE]
proof (prove)
using this:
\<forall>t\<in>{0<..<1}. Re z < Re (g t)
goal (1 subgoal):
1. 0 < 1 \<and> (\<forall>y>0. y < 1 \<longrightarrow> sgn (Re (g y - z)) = 1)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(fR has_sgnx 1) (at_right 0)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have [simp]:"(fR has_sgnx 1) (at_left 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (fR has_sgnx 1) (at_left 1)
[PROOF STEP]
unfolding fR_def has_sgnx_def eventually_at_left
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>b<1. \<forall>y>b. y < 1 \<longrightarrow> sgn (Re (g y - z)) = 1
[PROOF STEP]
apply (rule exI[where x=0])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < 1 \<and> (\<forall>y>0. y < 1 \<longrightarrow> sgn (Re (g y - z)) = 1)
[PROOF STEP]
using Re_pos
[PROOF STATE]
proof (prove)
using this:
\<forall>t\<in>{0<..<1}. Re z < Re (g t)
goal (1 subgoal):
1. 0 < 1 \<and> (\<forall>y>0. y < 1 \<longrightarrow> sgn (Re (g y - z)) = 1)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(fR has_sgnx 1) (at_left 1)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "fI 0\<noteq>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fI 0 \<noteq> 0
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> fI 0 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
assume "\<not> fI 0 \<noteq> 0"
[PROOF STATE]
proof (state)
this:
\<not> fI 0 \<noteq> 0
goal (1 subgoal):
1. \<not> fI 0 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> fI 0 \<noteq> 0
[PROOF STEP]
have "g 0 =z"
[PROOF STATE]
proof (prove)
using this:
\<not> fI 0 \<noteq> 0
goal (1 subgoal):
1. g 0 = z
[PROOF STEP]
using \<open>Re (g 0) = Re z\<close>
[PROOF STATE]
proof (prove)
using this:
\<not> fI 0 \<noteq> 0
Re (g 0) = Re z
goal (1 subgoal):
1. g 0 = z
[PROOF STEP]
unfolding fI_def
[PROOF STATE]
proof (prove)
using this:
\<not> Im (g 0 - z) \<noteq> 0
Re (g 0) = Re z
goal (1 subgoal):
1. g 0 = z
[PROOF STEP]
by (simp add: complex.expand)
[PROOF STATE]
proof (state)
this:
g 0 = z
goal (1 subgoal):
1. \<not> fI 0 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
g 0 = z
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
g 0 = z
goal (1 subgoal):
1. False
[PROOF STEP]
using \<open>z \<notin> path_image g\<close>
[PROOF STATE]
proof (prove)
using this:
g 0 = z
z \<notin> path_image g
goal (1 subgoal):
1. False
[PROOF STEP]
unfolding path_image_def
[PROOF STATE]
proof (prove)
using this:
g 0 = z
z \<notin> g ` {0..1}
goal (1 subgoal):
1. False
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
fI 0 \<noteq> 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
fI 0 \<noteq> 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have ?thesis when "fI 0>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathstart g z = 1/2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = 1 / 2
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = 1 / 2
[PROOF STEP]
have "(LIM x at_right 0. fI x / fR x :> at_top)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. LIM x at_right 0. fI x / fR x :> at_top
[PROOF STEP]
apply (subst filterlim_divide_at_bot_at_top_iff[of _ "fI 0"])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
2. fI 0 \<noteq> 0
3. (fR \<longlongrightarrow> 0) (at_right 0) \<and> (fR has_sgnx sgn (fI 0)) (at_right 0)
[PROOF STEP]
using that fI fR[OF \<open>Re (g 0) = Re z\<close>]
[PROOF STATE]
proof (prove)
using this:
0 < fI 0
(fI \<longlongrightarrow> fI 0) (at_right 0)
(fI \<longlongrightarrow> fI 1) (at_left 1)
(fR \<longlongrightarrow> 0) (at_right 0)
(fR \<longlongrightarrow> 0) (at_left 1)
goal (3 subgoals):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
2. fI 0 \<noteq> 0
3. (fR \<longlongrightarrow> 0) (at_right 0) \<and> (fR has_sgnx sgn (fI 0)) (at_right 0)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
LIM x at_right 0. fI x / fR x :> at_top
goal (1 subgoal):
1. jumpF_pathstart g z = 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
LIM x at_right 0. fI x / fR x :> at_top
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
LIM x at_right 0. fI x / fR x :> at_top
goal (1 subgoal):
1. jumpF_pathstart g z = 1 / 2
[PROOF STEP]
unfolding jumpF_pathstart_def fI_def fR_def jumpF_def
[PROOF STATE]
proof (prove)
using this:
LIM x at_right 0. Im (g x - z) / Re (g x - z) :> at_top
goal (1 subgoal):
1. (if LIM t at_right 0. Im (g t - z) / Re (g t - z) :> at_top then 1 / 2 else if LIM t at_right 0. Im (g t - z) / Re (g t - z) :> at_bot then - 1 / 2 else 0) = 1 / 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = 1 / 2
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathfinish g z = 1/2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
have "fI 1>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < fI 1
[PROOF STEP]
using loop that
[PROOF STATE]
proof (prove)
using this:
pathfinish g = pathstart g
0 < fI 0
goal (1 subgoal):
1. 0 < fI 1
[PROOF STEP]
unfolding path_defs fI_def
[PROOF STATE]
proof (prove)
using this:
g 1 = g 0
0 < Im (g 0 - z)
goal (1 subgoal):
1. 0 < Im (g 1 - z)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < fI 1
goal (1 subgoal):
1. jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 < fI 1
[PROOF STEP]
have "(LIM x at_left 1. fI x / fR x :> at_top)"
[PROOF STATE]
proof (prove)
using this:
0 < fI 1
goal (1 subgoal):
1. LIM x at_left 1. fI x / fR x :> at_top
[PROOF STEP]
apply (subst filterlim_divide_at_bot_at_top_iff[of _ "fI 1"])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. 0 < fI 1 \<Longrightarrow> (fI \<longlongrightarrow> fI 1) (at_left 1)
2. 0 < fI 1 \<Longrightarrow> fI 1 \<noteq> 0
3. 0 < fI 1 \<Longrightarrow> (fR \<longlongrightarrow> 0) (at_left 1) \<and> (fR has_sgnx sgn (fI 1)) (at_left 1)
[PROOF STEP]
using that fI fR[OF \<open>Re (g 0) = Re z\<close>]
[PROOF STATE]
proof (prove)
using this:
0 < fI 0
(fI \<longlongrightarrow> fI 0) (at_right 0)
(fI \<longlongrightarrow> fI 1) (at_left 1)
(fR \<longlongrightarrow> 0) (at_right 0)
(fR \<longlongrightarrow> 0) (at_left 1)
goal (3 subgoals):
1. 0 < fI 1 \<Longrightarrow> (fI \<longlongrightarrow> fI 1) (at_left 1)
2. 0 < fI 1 \<Longrightarrow> fI 1 \<noteq> 0
3. 0 < fI 1 \<Longrightarrow> (fR \<longlongrightarrow> 0) (at_left 1) \<and> (fR has_sgnx sgn (fI 1)) (at_left 1)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
LIM x at_left 1. fI x / fR x :> at_top
goal (1 subgoal):
1. jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
LIM x at_left 1. fI x / fR x :> at_top
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
LIM x at_left 1. fI x / fR x :> at_top
goal (1 subgoal):
1. jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
unfolding jumpF_pathfinish_def fI_def fR_def jumpF_def
[PROOF STATE]
proof (prove)
using this:
LIM x at_left 1. Im (g x - z) / Re (g x - z) :> at_top
goal (1 subgoal):
1. (if LIM t at_left 1. Im (g t - z) / Re (g t - z) :> at_top then 1 / 2 else if LIM t at_left 1. Im (g t - z) / Re (g t - z) :> at_bot then - 1 / 2 else 0) = 1 / 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathfinish g z = 1 / 2
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
jumpF_pathfinish g z = 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
jumpF_pathstart g z = 1 / 2
jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
jumpF_pathstart g z = 1 / 2
jumpF_pathfinish g z = 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = jumpF_pathfinish g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
0 < fI 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
0 < fI 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have ?thesis when "fI 0<0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathstart g z = - 1/2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = - 1 / 2
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = - 1 / 2
[PROOF STEP]
have "(LIM x at_right 0. fI x / fR x :> at_bot)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. LIM x at_right 0. fI x / fR x :> at_bot
[PROOF STEP]
apply (subst filterlim_divide_at_bot_at_top_iff[of _ "fI 0"])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
2. fI 0 \<noteq> 0
3. (fR \<longlongrightarrow> 0) (at_right 0) \<and> (fR has_sgnx - sgn (fI 0)) (at_right 0)
[PROOF STEP]
using that fI fR[OF \<open>Re (g 0) = Re z\<close>]
[PROOF STATE]
proof (prove)
using this:
fI 0 < 0
(fI \<longlongrightarrow> fI 0) (at_right 0)
(fI \<longlongrightarrow> fI 1) (at_left 1)
(fR \<longlongrightarrow> 0) (at_right 0)
(fR \<longlongrightarrow> 0) (at_left 1)
goal (3 subgoals):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
2. fI 0 \<noteq> 0
3. (fR \<longlongrightarrow> 0) (at_right 0) \<and> (fR has_sgnx - sgn (fI 0)) (at_right 0)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
LIM x at_right 0. fI x / fR x :> at_bot
goal (1 subgoal):
1. jumpF_pathstart g z = - 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
LIM x at_right 0. fI x / fR x :> at_bot
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
LIM x at_right 0. fI x / fR x :> at_bot
goal (1 subgoal):
1. jumpF_pathstart g z = - 1 / 2
[PROOF STEP]
unfolding jumpF_pathstart_def fI_def fR_def jumpF_def
[PROOF STATE]
proof (prove)
using this:
LIM x at_right 0. Im (g x - z) / Re (g x - z) :> at_bot
goal (1 subgoal):
1. (if LIM t at_right 0. Im (g t - z) / Re (g t - z) :> at_top then 1 / 2 else if LIM t at_right 0. Im (g t - z) / Re (g t - z) :> at_bot then - 1 / 2 else 0) = - 1 / 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = - 1 / 2
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = - 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = - 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathfinish g z = - 1/2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
have "fI 1<0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fI 1 < 0
[PROOF STEP]
using loop that
[PROOF STATE]
proof (prove)
using this:
pathfinish g = pathstart g
fI 0 < 0
goal (1 subgoal):
1. fI 1 < 0
[PROOF STEP]
unfolding path_defs fI_def
[PROOF STATE]
proof (prove)
using this:
g 1 = g 0
Im (g 0 - z) < 0
goal (1 subgoal):
1. Im (g 1 - z) < 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
fI 1 < 0
goal (1 subgoal):
1. jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
fI 1 < 0
[PROOF STEP]
have "(LIM x at_left 1. fI x / fR x :> at_bot)"
[PROOF STATE]
proof (prove)
using this:
fI 1 < 0
goal (1 subgoal):
1. LIM x at_left 1. fI x / fR x :> at_bot
[PROOF STEP]
apply (subst filterlim_divide_at_bot_at_top_iff[of _ "fI 1"])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. fI 1 < 0 \<Longrightarrow> (fI \<longlongrightarrow> fI 1) (at_left 1)
2. fI 1 < 0 \<Longrightarrow> fI 1 \<noteq> 0
3. fI 1 < 0 \<Longrightarrow> (fR \<longlongrightarrow> 0) (at_left 1) \<and> (fR has_sgnx - sgn (fI 1)) (at_left 1)
[PROOF STEP]
using that fI fR[OF \<open>Re (g 0) = Re z\<close>]
[PROOF STATE]
proof (prove)
using this:
fI 0 < 0
(fI \<longlongrightarrow> fI 0) (at_right 0)
(fI \<longlongrightarrow> fI 1) (at_left 1)
(fR \<longlongrightarrow> 0) (at_right 0)
(fR \<longlongrightarrow> 0) (at_left 1)
goal (3 subgoals):
1. fI 1 < 0 \<Longrightarrow> (fI \<longlongrightarrow> fI 1) (at_left 1)
2. fI 1 < 0 \<Longrightarrow> fI 1 \<noteq> 0
3. fI 1 < 0 \<Longrightarrow> (fR \<longlongrightarrow> 0) (at_left 1) \<and> (fR has_sgnx - sgn (fI 1)) (at_left 1)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
LIM x at_left 1. fI x / fR x :> at_bot
goal (1 subgoal):
1. jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
LIM x at_left 1. fI x / fR x :> at_bot
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
LIM x at_left 1. fI x / fR x :> at_bot
goal (1 subgoal):
1. jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
unfolding jumpF_pathfinish_def fI_def fR_def jumpF_def
[PROOF STATE]
proof (prove)
using this:
LIM x at_left 1. Im (g x - z) / Re (g x - z) :> at_bot
goal (1 subgoal):
1. (if LIM t at_left 1. Im (g t - z) / Re (g t - z) :> at_top then 1 / 2 else if LIM t at_left 1. Im (g t - z) / Re (g t - z) :> at_bot then - 1 / 2 else 0) = - 1 / 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathfinish g z = - 1 / 2
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
jumpF_pathfinish g z = - 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
jumpF_pathstart g z = - 1 / 2
jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
jumpF_pathstart g z = - 1 / 2
jumpF_pathfinish g z = - 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = jumpF_pathfinish g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
fI 0 < 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
fI 0 \<noteq> 0
0 < fI 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
fI 0 < 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
fI 0 \<noteq> 0
0 < fI 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
fI 0 < 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
by linarith
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = jumpF_pathfinish g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Re (g 0) = Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
Re (g 0) \<noteq> Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
Re (g 0) = Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
Re (g 0) \<noteq> Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
Re (g 0) = Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cindex_pathE g z = 0
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
cindex_pathE g z = 0
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
Re (winding_number g z) = 0
cindex_pathE g z = 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Re (winding_number g z) = 0
cindex_pathE g z = 0
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
2 * Re (winding_number g z) = - cindex_pathE g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>t\<in>{0<..<1}. Re z < Re (g t) \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<forall>t\<in>{0<..<1}. Re z < Re (g t) \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have ?thesis when Re_neg:"\<forall>t\<in>{0<..<1}. Re (g t) < Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have "Re (winding_number g z) = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Re (winding_number g z) = 0
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Re (winding_number g z) = 0
[PROOF STEP]
have "\<forall>p\<in>path_image g. Re z \<ge> Re p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>p\<in>path_image g. Re p \<le> Re z
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>p. p \<in> path_image g \<Longrightarrow> Re p \<le> Re z
[PROOF STEP]
fix p
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>p. p \<in> path_image g \<Longrightarrow> Re p \<le> Re z
[PROOF STEP]
assume "p \<in> path_image g"
[PROOF STATE]
proof (state)
this:
p \<in> path_image g
goal (1 subgoal):
1. \<And>p. p \<in> path_image g \<Longrightarrow> Re p \<le> Re z
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
p \<in> path_image g
[PROOF STEP]
obtain t where "0\<le>t" "t\<le>1" "p = g t"
[PROOF STATE]
proof (prove)
using this:
p \<in> path_image g
goal (1 subgoal):
1. (\<And>t. \<lbrakk>0 \<le> t; t \<le> 1; p = g t\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding path_image_def
[PROOF STATE]
proof (prove)
using this:
p \<in> g ` {0..1}
goal (1 subgoal):
1. (\<And>t. \<lbrakk>0 \<le> t; t \<le> 1; p = g t\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> t
t \<le> 1
p = g t
goal (1 subgoal):
1. \<And>p. p \<in> path_image g \<Longrightarrow> Re p \<le> Re z
[PROOF STEP]
have "Re z \<ge> Re (g t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Re (g t) \<le> Re z
[PROOF STEP]
apply (rule continuous_le_on_closure[of "{0<..<1}" "\<lambda>t. Re (g t)" t "Re z",simplified])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. continuous_on {0..1} (\<lambda>x. Re (g x))
2. 0 \<le> t \<and> t \<le> 1
3. \<And>x. 0 < x \<and> x < 1 \<Longrightarrow> Re (g x) \<le> Re z
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_on {0..1} (\<lambda>x. Re (g x))
[PROOF STEP]
using valid_path_imp_path[OF \<open>valid_path g\<close>,unfolded path_def]
[PROOF STATE]
proof (prove)
using this:
continuous_on {0..1} g
goal (1 subgoal):
1. continuous_on {0..1} (\<lambda>x. Re (g x))
[PROOF STEP]
by (auto intro:continuous_intros)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. 0 \<le> t \<and> t \<le> 1
2. \<And>x. 0 < x \<and> x < 1 \<Longrightarrow> Re (g x) \<le> Re z
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> t \<and> t \<le> 1
[PROOF STEP]
using \<open>0\<le>t\<close> \<open>t\<le>1\<close>
[PROOF STATE]
proof (prove)
using this:
0 \<le> t
t \<le> 1
goal (1 subgoal):
1. 0 \<le> t \<and> t \<le> 1
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x. 0 < x \<and> x < 1 \<Longrightarrow> Re (g x) \<le> Re z
[PROOF STEP]
subgoal for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < x \<and> x < 1 \<Longrightarrow> Re (g x) \<le> Re z
[PROOF STEP]
using that[rule_format,of x]
[PROOF STATE]
proof (prove)
using this:
x \<in> {0<..<1} \<Longrightarrow> Re (g x) < Re z
goal (1 subgoal):
1. 0 < x \<and> x < 1 \<Longrightarrow> Re (g x) \<le> Re z
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
Re (g t) \<le> Re z
goal (1 subgoal):
1. \<And>p. p \<in> path_image g \<Longrightarrow> Re p \<le> Re z
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Re (g t) \<le> Re z
[PROOF STEP]
show "Re z \<ge> Re p"
[PROOF STATE]
proof (prove)
using this:
Re (g t) \<le> Re z
goal (1 subgoal):
1. Re p \<le> Re z
[PROOF STEP]
using \<open>p = g t\<close>
[PROOF STATE]
proof (prove)
using this:
Re (g t) \<le> Re z
p = g t
goal (1 subgoal):
1. Re p \<le> Re z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Re p \<le> Re z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>p\<in>path_image g. Re p \<le> Re z
goal (1 subgoal):
1. Re (winding_number g z) = 0
[PROOF STEP]
from Re_winding_number_half_left[OF this \<open>valid_path g\<close> \<open>z\<notin>path_image g\<close>] loop
[PROOF STATE]
proof (chain)
picking this:
Re (winding_number g z) = (Im (Ln (z - pathfinish g)) - Im (Ln (z - pathstart g))) / (2 * pi)
pathfinish g = pathstart g
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Re (winding_number g z) = (Im (Ln (z - pathfinish g)) - Im (Ln (z - pathstart g))) / (2 * pi)
pathfinish g = pathstart g
goal (1 subgoal):
1. Re (winding_number g z) = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Re (winding_number g z) = 0
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Re (winding_number g z) = 0
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Re (winding_number g z) = 0
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have "cindex_pathE g z = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
have "cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
[PROOF STEP]
using index_ends[OF _ \<open>valid_path g\<close>] Re_neq \<open>s=0\<close>
[PROOF STATE]
proof (prove)
using this:
\<forall>t\<in>{0<..<1}. Re (g t) \<noteq> Re z \<Longrightarrow> cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
\<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z
s = 0
goal (1 subgoal):
1. cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
have "jumpF_pathstart g z = jumpF_pathfinish g z" when "Re (g 0) \<noteq> Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathstart g z = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = 0
[PROOF STEP]
using jumpF_pathstart_eq_0[OF \<open>path g\<close>] that
[PROOF STATE]
proof (prove)
using this:
Re (pathstart g) \<noteq> Re ?z \<Longrightarrow> jumpF_pathstart g ?z = 0
Re (g 0) \<noteq> Re z
goal (1 subgoal):
1. jumpF_pathstart g z = 0
[PROOF STEP]
unfolding path_defs
[PROOF STATE]
proof (prove)
using this:
Re (g 0) \<noteq> Re ?z \<Longrightarrow> jumpF_pathstart g ?z = 0
Re (g 0) \<noteq> Re z
goal (1 subgoal):
1. jumpF_pathstart g z = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathfinish g z=0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathfinish g z = 0
[PROOF STEP]
using jumpF_pathfinish_eq_0[OF \<open>path g\<close>] that loop
[PROOF STATE]
proof (prove)
using this:
Re (pathfinish g) \<noteq> Re ?z \<Longrightarrow> jumpF_pathfinish g ?z = 0
Re (g 0) \<noteq> Re z
pathfinish g = pathstart g
goal (1 subgoal):
1. jumpF_pathfinish g z = 0
[PROOF STEP]
unfolding path_defs
[PROOF STATE]
proof (prove)
using this:
Re (g 1) \<noteq> Re ?z \<Longrightarrow> jumpF_pathfinish g ?z = 0
Re (g 0) \<noteq> Re z
g 1 = g 0
goal (1 subgoal):
1. jumpF_pathfinish g z = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathfinish g z = 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
jumpF_pathstart g z = 0
jumpF_pathfinish g z = 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
jumpF_pathstart g z = 0
jumpF_pathfinish g z = 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = jumpF_pathfinish g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Re (g 0) \<noteq> Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Re (g 0) \<noteq> Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
have "jumpF_pathstart g z = jumpF_pathfinish g z" when "Re (g 0) = Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have [simp]:"(fR has_sgnx - 1) (at_right 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (fR has_sgnx - 1) (at_right 0)
[PROOF STEP]
unfolding fR_def has_sgnx_def eventually_at_right
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>b>0. \<forall>y>0. y < b \<longrightarrow> sgn (Re (g y - z)) = - 1
[PROOF STEP]
apply (rule exI[where x=1])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < 1 \<and> (\<forall>y>0. y < 1 \<longrightarrow> sgn (Re (g y - z)) = - 1)
[PROOF STEP]
using Re_neg
[PROOF STATE]
proof (prove)
using this:
\<forall>t\<in>{0<..<1}. Re (g t) < Re z
goal (1 subgoal):
1. 0 < 1 \<and> (\<forall>y>0. y < 1 \<longrightarrow> sgn (Re (g y - z)) = - 1)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(fR has_sgnx - 1) (at_right 0)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have [simp]:"(fR has_sgnx - 1) (at_left 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (fR has_sgnx - 1) (at_left 1)
[PROOF STEP]
unfolding fR_def has_sgnx_def eventually_at_left
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>b<1. \<forall>y>b. y < 1 \<longrightarrow> sgn (Re (g y - z)) = - 1
[PROOF STEP]
apply (rule exI[where x=0])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < 1 \<and> (\<forall>y>0. y < 1 \<longrightarrow> sgn (Re (g y - z)) = - 1)
[PROOF STEP]
using Re_neg
[PROOF STATE]
proof (prove)
using this:
\<forall>t\<in>{0<..<1}. Re (g t) < Re z
goal (1 subgoal):
1. 0 < 1 \<and> (\<forall>y>0. y < 1 \<longrightarrow> sgn (Re (g y - z)) = - 1)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(fR has_sgnx - 1) (at_left 1)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "fI 0\<noteq>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fI 0 \<noteq> 0
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> fI 0 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
assume "\<not> fI 0 \<noteq> 0"
[PROOF STATE]
proof (state)
this:
\<not> fI 0 \<noteq> 0
goal (1 subgoal):
1. \<not> fI 0 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> fI 0 \<noteq> 0
[PROOF STEP]
have "g 0 =z"
[PROOF STATE]
proof (prove)
using this:
\<not> fI 0 \<noteq> 0
goal (1 subgoal):
1. g 0 = z
[PROOF STEP]
using \<open>Re (g 0) = Re z\<close>
[PROOF STATE]
proof (prove)
using this:
\<not> fI 0 \<noteq> 0
Re (g 0) = Re z
goal (1 subgoal):
1. g 0 = z
[PROOF STEP]
unfolding fI_def
[PROOF STATE]
proof (prove)
using this:
\<not> Im (g 0 - z) \<noteq> 0
Re (g 0) = Re z
goal (1 subgoal):
1. g 0 = z
[PROOF STEP]
by (simp add: complex.expand)
[PROOF STATE]
proof (state)
this:
g 0 = z
goal (1 subgoal):
1. \<not> fI 0 \<noteq> 0 \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
g 0 = z
[PROOF STEP]
show False
[PROOF STATE]
proof (prove)
using this:
g 0 = z
goal (1 subgoal):
1. False
[PROOF STEP]
using \<open>z \<notin> path_image g\<close>
[PROOF STATE]
proof (prove)
using this:
g 0 = z
z \<notin> path_image g
goal (1 subgoal):
1. False
[PROOF STEP]
unfolding path_image_def
[PROOF STATE]
proof (prove)
using this:
g 0 = z
z \<notin> g ` {0..1}
goal (1 subgoal):
1. False
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
fI 0 \<noteq> 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
fI 0 \<noteq> 0
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have ?thesis when "fI 0>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathstart g z = - 1/2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = - 1 / 2
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = - 1 / 2
[PROOF STEP]
have "(LIM x at_right 0. fI x / fR x :> at_bot)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. LIM x at_right 0. fI x / fR x :> at_bot
[PROOF STEP]
apply (subst filterlim_divide_at_bot_at_top_iff[of _ "fI 0"])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
2. fI 0 \<noteq> 0
3. (fR \<longlongrightarrow> 0) (at_right 0) \<and> (fR has_sgnx - sgn (fI 0)) (at_right 0)
[PROOF STEP]
using that fI fR[OF \<open>Re (g 0) = Re z\<close>]
[PROOF STATE]
proof (prove)
using this:
0 < fI 0
(fI \<longlongrightarrow> fI 0) (at_right 0)
(fI \<longlongrightarrow> fI 1) (at_left 1)
(fR \<longlongrightarrow> 0) (at_right 0)
(fR \<longlongrightarrow> 0) (at_left 1)
goal (3 subgoals):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
2. fI 0 \<noteq> 0
3. (fR \<longlongrightarrow> 0) (at_right 0) \<and> (fR has_sgnx - sgn (fI 0)) (at_right 0)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
LIM x at_right 0. fI x / fR x :> at_bot
goal (1 subgoal):
1. jumpF_pathstart g z = - 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
LIM x at_right 0. fI x / fR x :> at_bot
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
LIM x at_right 0. fI x / fR x :> at_bot
goal (1 subgoal):
1. jumpF_pathstart g z = - 1 / 2
[PROOF STEP]
unfolding jumpF_pathstart_def fI_def fR_def jumpF_def
[PROOF STATE]
proof (prove)
using this:
LIM x at_right 0. Im (g x - z) / Re (g x - z) :> at_bot
goal (1 subgoal):
1. (if LIM t at_right 0. Im (g t - z) / Re (g t - z) :> at_top then 1 / 2 else if LIM t at_right 0. Im (g t - z) / Re (g t - z) :> at_bot then - 1 / 2 else 0) = - 1 / 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = - 1 / 2
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = - 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = - 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathfinish g z = - 1/2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
have "fI 1>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < fI 1
[PROOF STEP]
using loop that
[PROOF STATE]
proof (prove)
using this:
pathfinish g = pathstart g
0 < fI 0
goal (1 subgoal):
1. 0 < fI 1
[PROOF STEP]
unfolding path_defs fI_def
[PROOF STATE]
proof (prove)
using this:
g 1 = g 0
0 < Im (g 0 - z)
goal (1 subgoal):
1. 0 < Im (g 1 - z)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < fI 1
goal (1 subgoal):
1. jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 < fI 1
[PROOF STEP]
have "(LIM x at_left 1. fI x / fR x :> at_bot)"
[PROOF STATE]
proof (prove)
using this:
0 < fI 1
goal (1 subgoal):
1. LIM x at_left 1. fI x / fR x :> at_bot
[PROOF STEP]
apply (subst filterlim_divide_at_bot_at_top_iff[of _ "fI 1"])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. 0 < fI 1 \<Longrightarrow> (fI \<longlongrightarrow> fI 1) (at_left 1)
2. 0 < fI 1 \<Longrightarrow> fI 1 \<noteq> 0
3. 0 < fI 1 \<Longrightarrow> (fR \<longlongrightarrow> 0) (at_left 1) \<and> (fR has_sgnx - sgn (fI 1)) (at_left 1)
[PROOF STEP]
using that fI fR[OF \<open>Re (g 0) = Re z\<close>]
[PROOF STATE]
proof (prove)
using this:
0 < fI 0
(fI \<longlongrightarrow> fI 0) (at_right 0)
(fI \<longlongrightarrow> fI 1) (at_left 1)
(fR \<longlongrightarrow> 0) (at_right 0)
(fR \<longlongrightarrow> 0) (at_left 1)
goal (3 subgoals):
1. 0 < fI 1 \<Longrightarrow> (fI \<longlongrightarrow> fI 1) (at_left 1)
2. 0 < fI 1 \<Longrightarrow> fI 1 \<noteq> 0
3. 0 < fI 1 \<Longrightarrow> (fR \<longlongrightarrow> 0) (at_left 1) \<and> (fR has_sgnx - sgn (fI 1)) (at_left 1)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
LIM x at_left 1. fI x / fR x :> at_bot
goal (1 subgoal):
1. jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
LIM x at_left 1. fI x / fR x :> at_bot
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
LIM x at_left 1. fI x / fR x :> at_bot
goal (1 subgoal):
1. jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
unfolding jumpF_pathfinish_def fI_def fR_def jumpF_def
[PROOF STATE]
proof (prove)
using this:
LIM x at_left 1. Im (g x - z) / Re (g x - z) :> at_bot
goal (1 subgoal):
1. (if LIM t at_left 1. Im (g t - z) / Re (g t - z) :> at_top then 1 / 2 else if LIM t at_left 1. Im (g t - z) / Re (g t - z) :> at_bot then - 1 / 2 else 0) = - 1 / 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathfinish g z = - 1 / 2
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
jumpF_pathfinish g z = - 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
jumpF_pathstart g z = - 1 / 2
jumpF_pathfinish g z = - 1 / 2
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
jumpF_pathstart g z = - 1 / 2
jumpF_pathfinish g z = - 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = jumpF_pathfinish g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
0 < fI 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
0 < fI 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have ?thesis when "fI 0<0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathstart g z = 1/2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathstart g z = 1 / 2
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathstart g z = 1 / 2
[PROOF STEP]
have "(LIM x at_right 0. fI x / fR x :> at_top)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. LIM x at_right 0. fI x / fR x :> at_top
[PROOF STEP]
apply (subst filterlim_divide_at_bot_at_top_iff[of _ "fI 0"])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
2. fI 0 \<noteq> 0
3. (fR \<longlongrightarrow> 0) (at_right 0) \<and> (fR has_sgnx sgn (fI 0)) (at_right 0)
[PROOF STEP]
using that fI fR[OF \<open>Re (g 0) = Re z\<close>]
[PROOF STATE]
proof (prove)
using this:
fI 0 < 0
(fI \<longlongrightarrow> fI 0) (at_right 0)
(fI \<longlongrightarrow> fI 1) (at_left 1)
(fR \<longlongrightarrow> 0) (at_right 0)
(fR \<longlongrightarrow> 0) (at_left 1)
goal (3 subgoals):
1. (fI \<longlongrightarrow> fI 0) (at_right 0)
2. fI 0 \<noteq> 0
3. (fR \<longlongrightarrow> 0) (at_right 0) \<and> (fR has_sgnx sgn (fI 0)) (at_right 0)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
LIM x at_right 0. fI x / fR x :> at_top
goal (1 subgoal):
1. jumpF_pathstart g z = 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
LIM x at_right 0. fI x / fR x :> at_top
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
LIM x at_right 0. fI x / fR x :> at_top
goal (1 subgoal):
1. jumpF_pathstart g z = 1 / 2
[PROOF STEP]
unfolding jumpF_pathstart_def fI_def fR_def jumpF_def
[PROOF STATE]
proof (prove)
using this:
LIM x at_right 0. Im (g x - z) / Re (g x - z) :> at_top
goal (1 subgoal):
1. (if LIM t at_right 0. Im (g t - z) / Re (g t - z) :> at_top then 1 / 2 else if LIM t at_right 0. Im (g t - z) / Re (g t - z) :> at_bot then - 1 / 2 else 0) = 1 / 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = 1 / 2
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
have "jumpF_pathfinish g z = 1/2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
have "fI 1<0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fI 1 < 0
[PROOF STEP]
using loop that
[PROOF STATE]
proof (prove)
using this:
pathfinish g = pathstart g
fI 0 < 0
goal (1 subgoal):
1. fI 1 < 0
[PROOF STEP]
unfolding path_defs fI_def
[PROOF STATE]
proof (prove)
using this:
g 1 = g 0
Im (g 0 - z) < 0
goal (1 subgoal):
1. Im (g 1 - z) < 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
fI 1 < 0
goal (1 subgoal):
1. jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
fI 1 < 0
[PROOF STEP]
have "(LIM x at_left 1. fI x / fR x :> at_top)"
[PROOF STATE]
proof (prove)
using this:
fI 1 < 0
goal (1 subgoal):
1. LIM x at_left 1. fI x / fR x :> at_top
[PROOF STEP]
apply (subst filterlim_divide_at_bot_at_top_iff[of _ "fI 1"])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. fI 1 < 0 \<Longrightarrow> (fI \<longlongrightarrow> fI 1) (at_left 1)
2. fI 1 < 0 \<Longrightarrow> fI 1 \<noteq> 0
3. fI 1 < 0 \<Longrightarrow> (fR \<longlongrightarrow> 0) (at_left 1) \<and> (fR has_sgnx sgn (fI 1)) (at_left 1)
[PROOF STEP]
using that fI fR[OF \<open>Re (g 0) = Re z\<close>]
[PROOF STATE]
proof (prove)
using this:
fI 0 < 0
(fI \<longlongrightarrow> fI 0) (at_right 0)
(fI \<longlongrightarrow> fI 1) (at_left 1)
(fR \<longlongrightarrow> 0) (at_right 0)
(fR \<longlongrightarrow> 0) (at_left 1)
goal (3 subgoals):
1. fI 1 < 0 \<Longrightarrow> (fI \<longlongrightarrow> fI 1) (at_left 1)
2. fI 1 < 0 \<Longrightarrow> fI 1 \<noteq> 0
3. fI 1 < 0 \<Longrightarrow> (fR \<longlongrightarrow> 0) (at_left 1) \<and> (fR has_sgnx sgn (fI 1)) (at_left 1)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
LIM x at_left 1. fI x / fR x :> at_top
goal (1 subgoal):
1. jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
LIM x at_left 1. fI x / fR x :> at_top
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
LIM x at_left 1. fI x / fR x :> at_top
goal (1 subgoal):
1. jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
unfolding jumpF_pathfinish_def fI_def fR_def jumpF_def
[PROOF STATE]
proof (prove)
using this:
LIM x at_left 1. Im (g x - z) / Re (g x - z) :> at_top
goal (1 subgoal):
1. (if LIM t at_left 1. Im (g t - z) / Re (g t - z) :> at_top then 1 / 2 else if LIM t at_left 1. Im (g t - z) / Re (g t - z) :> at_bot then - 1 / 2 else 0) = 1 / 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
jumpF_pathfinish g z = 1 / 2
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
jumpF_pathfinish g z = 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
jumpF_pathstart g z = 1 / 2
jumpF_pathfinish g z = 1 / 2
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
jumpF_pathstart g z = 1 / 2
jumpF_pathfinish g z = 1 / 2
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = jumpF_pathfinish g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
fI 0 < 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
fI 0 \<noteq> 0
0 < fI 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
fI 0 < 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
fI 0 \<noteq> 0
0 < fI 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
fI 0 < 0 \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
by linarith
[PROOF STATE]
proof (state)
this:
jumpF_pathstart g z = jumpF_pathfinish g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Re (g 0) = Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
Re (g 0) \<noteq> Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
Re (g 0) = Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
cindex_pathE g z = jumpF_pathstart g z - jumpF_pathfinish g z
Re (g 0) \<noteq> Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
Re (g 0) = Re z \<Longrightarrow> jumpF_pathstart g z = jumpF_pathfinish g z
goal (1 subgoal):
1. cindex_pathE g z = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cindex_pathE g z = 0
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
cindex_pathE g z = 0
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
Re (winding_number g z) = 0
cindex_pathE g z = 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Re (winding_number g z) = 0
cindex_pathE g z = 0
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
2 * Re (winding_number g z) = - cindex_pathE g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>t\<in>{0<..<1}. Re (g t) < Re z \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
(\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)
\<forall>t\<in>{0<..<1}. Re z < Re (g t) \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
\<forall>t\<in>{0<..<1}. Re (g t) < Re z \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<forall>t\<in>{0<..<1}. Re z < Re (g t)) \<or> (\<forall>t\<in>{0<..<1}. Re (g t) < Re z)
\<forall>t\<in>{0<..<1}. Re z < Re (g t) \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
\<forall>t\<in>{0<..<1}. Re (g t) < Re z \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
2 * Re (winding_number g z) = - cindex_pathE g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
s = 0 \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
s = 0 \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
have ?goal when "s\<noteq>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have "Re (g s) = Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Re (g s) = Re z
[PROOF STEP]
using \<open>s = 0 \<or> Re (g s) = Re z\<close> that
[PROOF STATE]
proof (prove)
using this:
s = 0 \<or> Re (g s) = Re z
s \<noteq> 0
goal (1 subgoal):
1. Re (g s) = Re z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Re (g s) = Re z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
define g' where "g' = shiftpath s g"
[PROOF STATE]
proof (state)
this:
g' = shiftpath s g
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have "2 * Re (winding_number g' z) = - cindex_pathE g' z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 2 * Re (winding_number g' z) = - cindex_pathE g' z
[PROOF STEP]
proof (rule winding_number_cindex_pathE_aux)
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. finite_ReZ_segments g' z
2. valid_path g'
3. z \<notin> path_image g'
4. Re (g' 1) = Re z
5. Re (g' 0) = Re z
[PROOF STEP]
show "Re (g' 1) = Re z" "Re (g' 0) = Re z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Re (g' 1) = Re z &&& Re (g' 0) = Re z
[PROOF STEP]
using \<open>Re (g s) = Re z\<close> \<open>s\<in>{0..<1}\<close> \<open>s\<noteq>0\<close>
[PROOF STATE]
proof (prove)
using this:
Re (g s) = Re z
s \<in> {0..<1}
s \<noteq> 0
goal (1 subgoal):
1. Re (g' 1) = Re z &&& Re (g' 0) = Re z
[PROOF STEP]
unfolding g'_def shiftpath_def
[PROOF STATE]
proof (prove)
using this:
Re (g s) = Re z
s \<in> {0..<1}
s \<noteq> 0
goal (1 subgoal):
1. Re (if s + 1 \<le> 1 then g (s + 1) else g (s + 1 - 1)) = Re z &&& Re (if s + 0 \<le> 1 then g (s + 0) else g (s + 0 - 1)) = Re z
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
Re (g' 1) = Re z
Re (g' 0) = Re z
goal (3 subgoals):
1. finite_ReZ_segments g' z
2. valid_path g'
3. z \<notin> path_image g'
[PROOF STEP]
show "valid_path g'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. valid_path g'
[PROOF STEP]
using valid_path_shiftpath[OF \<open>valid_path g\<close> loop,of s,folded g'_def] \<open>s\<in>{0..<1}\<close>
[PROOF STATE]
proof (prove)
using this:
s \<in> {0..1} \<Longrightarrow> valid_path g'
s \<in> {0..<1}
goal (1 subgoal):
1. valid_path g'
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
valid_path g'
goal (2 subgoals):
1. finite_ReZ_segments g' z
2. z \<notin> path_image g'
[PROOF STEP]
show "z \<notin> path_image g'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. z \<notin> path_image g'
[PROOF STEP]
using \<open>s \<in> {0..<1}\<close> assms(3) g'_def loop path_image_shiftpath
[PROOF STATE]
proof (prove)
using this:
s \<in> {0..<1}
z \<notin> path_image g
g' = shiftpath s g
pathfinish g = pathstart g
\<lbrakk>?a \<in> {0..1}; pathfinish ?g = pathstart ?g\<rbrakk> \<Longrightarrow> path_image (shiftpath ?a ?g) = path_image ?g
goal (1 subgoal):
1. z \<notin> path_image g'
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
z \<notin> path_image g'
goal (1 subgoal):
1. finite_ReZ_segments g' z
[PROOF STEP]
show "finite_ReZ_segments g' z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite_ReZ_segments g' z
[PROOF STEP]
using finite_ReZ_segments_shiftpah[OF \<open>finite_ReZ_segments g z\<close> _ \<open>path g\<close> loop] \<open>s\<in>{0..<1}\<close>
[PROOF STATE]
proof (prove)
using this:
?s \<in> {0..1} \<Longrightarrow> finite_ReZ_segments (shiftpath ?s g) z
s \<in> {0..<1}
goal (1 subgoal):
1. finite_ReZ_segments g' z
[PROOF STEP]
unfolding g'_def
[PROOF STATE]
proof (prove)
using this:
?s \<in> {0..1} \<Longrightarrow> finite_ReZ_segments (shiftpath ?s g) z
s \<in> {0..<1}
goal (1 subgoal):
1. finite_ReZ_segments (shiftpath s g) z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
finite_ReZ_segments g' z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
2 * Re (winding_number g' z) = - cindex_pathE g' z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
2 * Re (winding_number g' z) = - cindex_pathE g' z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have "winding_number g' z = winding_number g z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. winding_number g' z = winding_number g z
[PROOF STEP]
unfolding g'_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. winding_number (shiftpath s g) z = winding_number g z
[PROOF STEP]
apply (rule winding_number_shiftpath[OF \<open>path g\<close> \<open>z \<notin> path_image g\<close> loop])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s \<in> {0..1}
[PROOF STEP]
using \<open>s\<in>{0..<1}\<close>
[PROOF STATE]
proof (prove)
using this:
s \<in> {0..<1}
goal (1 subgoal):
1. s \<in> {0..1}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
winding_number g' z = winding_number g z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
winding_number g' z = winding_number g z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have "cindex_pathE g' z = cindex_pathE g z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindex_pathE g' z = cindex_pathE g z
[PROOF STEP]
unfolding g'_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cindex_pathE (shiftpath s g) z = cindex_pathE g z
[PROOF STEP]
apply (rule cindex_pathE_shiftpath[OF \<open>finite_ReZ_segments g z\<close> _ \<open>path g\<close> loop])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s \<in> {0..1}
[PROOF STEP]
using \<open>s\<in>{0..<1}\<close>
[PROOF STATE]
proof (prove)
using this:
s \<in> {0..<1}
goal (1 subgoal):
1. s \<in> {0..1}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cindex_pathE g' z = cindex_pathE g z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
2 * Re (winding_number g' z) = - cindex_pathE g' z
winding_number g' z = winding_number g z
cindex_pathE g' z = cindex_pathE g z
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
2 * Re (winding_number g' z) = - cindex_pathE g' z
winding_number g' z = winding_number g z
cindex_pathE g' z = cindex_pathE g z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
2 * Re (winding_number g z) = - cindex_pathE g z
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
s \<noteq> 0 \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
s = 0 \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
s \<noteq> 0 \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
have ?goal
[PROOF STATE]
proof (prove)
using this:
s = 0 \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
s \<noteq> 0 \<Longrightarrow> 2 * Re (winding_number g z) = - cindex_pathE g z
goal (1 subgoal):
1. 2 * Re (winding_number g z) = - cindex_pathE g z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
2 * Re (winding_number g z) = - cindex_pathE g z
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
2 * Re (winding_number g z) = - cindex_pathE g z
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
have "winding_number g z \<in> \<int>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. winding_number g z \<in> \<int>
[PROOF STEP]
using integer_winding_number[OF _ loop \<open>z\<notin>path_image g\<close>] valid_path_imp_path[OF \<open>valid_path g\<close>]
[PROOF STATE]
proof (prove)
using this:
path g \<Longrightarrow> winding_number g z \<in> \<int>
path g
goal (1 subgoal):
1. winding_number g z \<in> \<int>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
winding_number g z \<in> \<int>
goal (1 subgoal):
1. \<And>s. \<lbrakk>s \<in> {0..<1}; s = 0 \<or> Re (g s) = Re z; \<forall>t\<in>{s<..<1}. Re (g t) \<noteq> Re z; finite_ReZ_segments (subpath 0 s g) z\<rbrakk> \<Longrightarrow> winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
2 * Re (winding_number g z) = - cindex_pathE g z
winding_number g z \<in> \<int>
[PROOF STEP]
show "winding_number g z = - cindex_pathE g z / 2"
[PROOF STATE]
proof (prove)
using this:
2 * Re (winding_number g z) = - cindex_pathE g z
winding_number g z \<in> \<int>
goal (1 subgoal):
1. winding_number g z = complex_of_real (- cindex_pathE g z / 2)
[PROOF STEP]
by (metis add.right_neutral complex_eq complex_is_Int_iff mult_zero_right
nonzero_mult_div_cancel_left of_real_0 zero_neq_numeral)
[PROOF STATE]
proof (state)
this:
winding_number g z = complex_of_real (- cindex_pathE g z / 2)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 55223, "file": "Winding_Number_Eval_Cauchy_Index_Theorem", "length": 623}
|
import random
import numpy as np
def _lstsq_vector(A, b, constraints=None):
"""Minimize || A*x - b || subject to equality constraints x_i = c_i.
Let A be a matrix of shape (m, n) and b a vector of length m. This function
solves the minimization problem || A*x - b || for x, subject to 0 <= r <= n
equality constraints x_i = c_i. The n entries of the vector of constraints
must be either NaN (if there is no constraint for that entry) or a scalar.
Args:
A: Coefficient matrix.
b: Dependent variable.
constraints: Constraint vector.
Returns:
x: The minimizer of the problem.
"""
if constraints is None:
return np.linalg.lstsq(A, b, rcond=None)[0]
# Indices of the constraints on x
indices = np.nonzero(np.isfinite(constraints))[0]
# Values of the constraints on x
c = constraints[np.isfinite(constraints)]
# Number of constraints
r = c.size
n = A.shape[1]
# Matrix P.T projects (x_1, ..., x_n) to (x_i1, ..., x_ir)
# where ij are the indices of the constraints on x
P = np.zeros((n, r))
for i, x in enumerate(indices):
P[x][i] = 1
A00 = np.matmul(A.T, A)
A01 = P
A10 = P.T
A11 = np.zeros(2*(P.shape[1],))
# Augmented A
A_ = np.block([[A00, A01], [A10, A11]])
b0 = np.matmul(A.T, b)
b1 = c
# Augmented b
b_ = np.block([b0, b1])
# Solve the augmented system
x = np.linalg.lstsq(A_, b_, rcond=None)[0]
return x[:n]
def _lstsq_matrix(X, Y, constraints=None):
"""Minimize || A*X - Y || for A, subject to equality constraints a_ij = c_ij.
Let X, Y be matrices of shapes (n, k), (m, k) respectively, so that A is a
matrix of shape (m, n). This function solves the minimization problem
|| A*X - Y || for A, subject to 0 <= r <= m*n equality constraints a_ij = c_ij.
The entries of the (m, n) matrix of constraints must be either NaN (if there
is no constraint for that entry) or a scalar.
Args:
X: Input matrix.
Y: Output matrix.
constraints: Constraint matrix.
Returns:
A: The minimizer of the problem.
"""
if constraints is None:
return np.linalg.lstsq(X.T, Y.T, rcond=None)[0].T
A = np.empty((Y.shape[0], X.shape[0]))
for i in range(Y.shape[0]):
A[i] = _lstsq_vector(X.T, Y[i], constraints[i])
return A
def lstsq(a, b, constraints=None):
"""Minimize || a*x - b || for x, subject to equality constraints on the elements of x.
Find the minimum of the function L(x) = || a*x - b ||, where a is a matrix of shape
(m, n) and b is either a vector of length m or a matrix of shape (m, k). In the first
case, the solution x is a vector of length n, in the latter, it is a matrix of shape
(n, k). If x is a vector (resp. a matrix), 0 <= r <= n (resp. 0 <= r <= n*k) constraints
of the form x_i = c_i (resp. x_ij = c_ij) can be provided. In particular, the entries
of the n-vector (resp. (n, k)-matrix) of constraints must be either NaN (if there is no
constraint for that entry) or a scalar.
Args:
a: Matrix of shape (m, n).
b: Vector of shape (m,) or matrix of shape (m, k).
constraints: Vector or matrix of constraints.
Returns:
x: The least-squares solution.
"""
if b.ndim == 1:
return _lstsq_vector(a, b, constraints)
return _lstsq_matrix(a.T, b.T, constraints).T
def lstsq_ransac(a, b, num_iter, sample_size, min_num_inliers, tol, constraints=None):
best_x = None
best_err = float('inf')
for i in range(num_iter):
# Randomly select some distinct pairs of rows (a_i, b_i)
sample = random.sample(range(a.shape[0]), min(sample_size, a.shape[0]))
a_ = a[sample]
b_ = b[sample]
# Estimate a model with these pairs
x = lstsq(a_, b_, constraints)
also_inliers = []
# For every other pair
for k in set(range(a.shape[0])).difference(sample):
ak = a[k]
bk = b[k]
# If it agrees with the model keep it
if np.linalg.norm(bk - np.matmul(ak, x)) < tol:
also_inliers.append(k)
# If there is enough data that agrees with this model
if len(also_inliers) >= min_num_inliers:
a_ = a[sample + also_inliers]
b_ = b[sample + also_inliers]
# Estimate a final model with all the data
x = lstsq(a_, b_, constraints)
err = np.linalg.norm(b_ - np.matmul(a_, x))
# If this estimate is better than the last one
if err < best_err:
# Update the best model
best_x = x
best_err = err
return best_x
|
{"hexsha": "7c734063e137328a553aa2f23b3ef080c43c60d6", "size": 4771, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/cocktail/utils.py", "max_stars_repo_name": "marcromani/nica-nmf", "max_stars_repo_head_hexsha": "0a83fd2d1b90d0715929496a7a646c434120f296", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-12-12T18:45:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-25T07:02:14.000Z", "max_issues_repo_path": "src/cocktail/utils.py", "max_issues_repo_name": "marcromani/nica-nmf", "max_issues_repo_head_hexsha": "0a83fd2d1b90d0715929496a7a646c434120f296", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-13T13:57:03.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-13T14:18:03.000Z", "max_forks_repo_path": "src/cocktail/utils.py", "max_forks_repo_name": "marcromani/nica-nmf", "max_forks_repo_head_hexsha": "0a83fd2d1b90d0715929496a7a646c434120f296", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-13T13:57:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T05:17:41.000Z", "avg_line_length": 30.1962025316, "max_line_length": 92, "alphanum_fraction": 0.5933766506, "include": true, "reason": "import numpy", "num_tokens": 1325}
|
module SafeFlagPrimTrustMe where
open import Agda.Builtin.Equality
open import Agda.Builtin.TrustMe
|
{"hexsha": "694748867df72877104d8d5c7a74d7f114e53ede", "size": 101, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/Fail/SafeFlagPrimTrustMe.agda", "max_stars_repo_name": "bennn/agda", "max_stars_repo_head_hexsha": "f77b563d328513138d6c88bf0a3e350a9b91f8ed", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/Fail/SafeFlagPrimTrustMe.agda", "max_issues_repo_name": "bennn/agda", "max_issues_repo_head_hexsha": "f77b563d328513138d6c88bf0a3e350a9b91f8ed", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/Fail/SafeFlagPrimTrustMe.agda", "max_forks_repo_name": "bennn/agda", "max_forks_repo_head_hexsha": "f77b563d328513138d6c88bf0a3e350a9b91f8ed", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.2, "max_line_length": 33, "alphanum_fraction": 0.8613861386, "num_tokens": 27}
|
from __future__ import division
import numpy as np
from sklearn.svm import SVC
from scipy.special import expit
import copy
from scipy.stats import norm
from background_check import BackgroundCheck
class OcDecomposition(object):
def __init__(self, base_estimator=BackgroundCheck(),
normalization=None):
self._base_estimator = base_estimator
self._estimators = []
self._thresholds = []
self._normalization = normalization
self._priors = []
self._means = []
def fit(self, X, y, threshold_percentile=10, mus=None, ms=None):
classes = np.unique(y)
n_classes = np.alen(classes)
class_count = np.bincount(y)
self._priors = class_count / np.alen(y)
for c_index in np.arange(n_classes):
c = copy.deepcopy(self._base_estimator)
c.fit(X[y == c_index])
self._estimators.append(c)
scores = self.score(X, mus=mus, ms=ms)
self._thresholds = np.zeros(len(self._estimators))
for c_index in np.arange(n_classes):
u = np.unique(scores[:, c_index])
self._thresholds[c_index] = np.percentile(u, threshold_percentile)
# self._thresholds = np.percentile(scores, threshold_percentile, axis=0)
# for i, t in enumerate(self._thresholds):
# if t == 0.0:
# s = scores[:, i]
# self._thresholds[i] = np.amin(s[s > 0])
self._means = scores.mean(axis=0)
def set_estimators(self, estimators, X, y, threshold_percentile=10,
mus=None, ms=None):
classes = np.unique(y)
n_classes = np.alen(classes)
self._estimators = estimators
class_count = np.bincount(y)
self._priors = class_count / np.alen(y)
scores = self.score(X, mus=mus, ms=ms)
self._thresholds = np.zeros(len(self._estimators))
for c_index in np.arange(n_classes):
u = np.unique(scores[:, c_index])
self._thresholds[c_index] = np.percentile(u, threshold_percentile)
self._means = scores.mean(axis=0)
def score(self, X, mus=None, ms=None):
if type(self._base_estimator) is BackgroundCheck:
return self.score_bc(X, mus=mus, ms=ms)
elif self._normalization in ["O-norm", "T-norm"]:
return self.score_dens(X) + 1e-8 # this value is added to avoid
# having 0-valued thresholds,
# which is a problem for o-norm
def score_dens(self, X):
n = np.alen(X)
scores = np.zeros((n, len(self._estimators)))
for i, estimator in enumerate(self._estimators):
s = np.exp(estimator.score(X))
scores[range(n), i] = s
return scores
def score_bc(self, X, mus=None, ms=None):
n = np.alen(X)
probas = np.zeros((n, len(self._estimators)))
for i, estimator in enumerate(self._estimators):
if mus is None:
mu = None
else:
mu = mus[i]
if ms is None:
m = None
else:
m = ms[i]
probas[range(n), i] = estimator.predict_proba(X, mu=mu, m=m)[:, 1]
return probas
def predict(self, X, mus=None, ms=None):
scores = self.score(X, mus=mus, ms=ms)
if type(self._base_estimator) is BackgroundCheck:
return self.predict_bc(scores)
elif self._normalization == "O-norm":
return self.predict_o_norm(scores)
elif self._normalization == "T-norm":
return self.predict_t_norm(scores)
def predict_o_norm(self, scores):
reject = scores <= self._thresholds
scores /= self._thresholds
scores[reject] = -1
max_scores = scores.max(axis=1)
predictions = scores.argmax(axis=1)
predictions[max_scores <= 1] = len(self._estimators)
return predictions
def predict_t_norm(self, scores):
reject = scores <= self._thresholds
scores -= self._thresholds
means = self._means - self._thresholds
scores = (scores / means) * self._priors
scores[reject] = -np.inf
max_scores = scores.max(axis=1)
predictions = scores.argmax(axis=1)
predictions[max_scores <= 0] = len(self._estimators)
return predictions
def predict_bc(self, scores):
reject = scores <= self._thresholds
total_reject = (np.sum(reject, axis=1) == len(self._estimators))
scores[reject] = -1
predictions = scores.argmax(axis=1)
predictions[total_reject] = len(self._estimators)
return predictions
def accuracy(self, X, y, mus=None, ms=None):
predictions = self.predict(X, mus=mus, ms=ms)
return np.mean(predictions == y)
@property
def thresholds(self):
return self._thresholds
|
{"hexsha": "1181b6d93dd53d8b5a57092c1a20f57a6658ed1c", "size": 4953, "ext": "py", "lang": "Python", "max_stars_repo_path": "cwc/models/oc_decomposition.py", "max_stars_repo_name": "perellonieto/background_check", "max_stars_repo_head_hexsha": "a5b6549a62be276c7199e87e78a94a64af688ab9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-01-14T12:59:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T10:55:17.000Z", "max_issues_repo_path": "cwc/models/oc_decomposition.py", "max_issues_repo_name": "REFRAME/background_check", "max_issues_repo_head_hexsha": "7da967bad4a6d8cbc924b5301041f3c99ba39595", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cwc/models/oc_decomposition.py", "max_forks_repo_name": "REFRAME/background_check", "max_forks_repo_head_hexsha": "7da967bad4a6d8cbc924b5301041f3c99ba39595", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5227272727, "max_line_length": 80, "alphanum_fraction": 0.5867151221, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1189}
|
### run using 'marc' conda env ###
import pandas as pd
import numpy as np
import pickle
# original ADMISSIONS
df = pd.read_pickle('/project/M-ABeICU176709/ABeICU/data/ADMISSIONS.pickle', compression = 'zip')
adm_original = len(df['ADMISSION_ID'].unique())
pt_original = len(df['PATIENT_ID'].unique())
print('original no. of admissions: ', adm_original)
print('original no. of patients: ', pt_original)
print()
print('--------------------------------------------------')
print()
# Filtering admissions with ICU LOS >= 24h
df = df[df['ICU_LOS_24H_FLAG'] == 1].reset_index(drop = True)
adm_24h = adm_original - len(df['ADMISSION_ID'].unique())
pt_24h = pt_original - len(df['PATIENT_ID'].unique())
print('admissions with LOS < 24 hours: ', adm_24h)
print('patients with LOS < 24 hours: ', pt_24h)
print()
print('--------------------------------------------------')
print()
# Calculating ICU LOS
# Excluding outliers based on ICU LOS (top 2th percentile = '>30 days')
df['ICU_LOS'] = df.apply(lambda x: (x['ICU_DISCH_DATETIME'] - x['ICU_ADMIT_DATETIME']).days, axis = 1)
df = df[df['ICU_LOS'] < df['ICU_LOS'].quantile(0.98)].reset_index(drop = True)
adm_30d = adm_original - adm_24h - len(df['ADMISSION_ID'].unique())
pt_30d = pt_original - pt_24h - len(df['PATIENT_ID'].unique())
print('admissions with LOS > 30 days: ', adm_30d)
print('patients with LOS > 30 days: ', pt_30d)
print()
print('--------------------------------------------------')
print()
# calculating admissions with no registered delirium assessment
df = pd.read_pickle('/project/M-ABeICU176709/delirium/data/inputs/master/master_input.pickle',
compression = 'zip')
df = df[df['period'] >= 1].reset_index(drop = True)
df.dropna(subset = ['delirium_12h', 'delirium_24h'], inplace = True)
adm_nan = adm_original - adm_24h - adm_30d - len(df['ADMISSION_ID'].unique())
pt_nan = pt_original - pt_24h - pt_30d - len(df['PATIENT_ID'].unique())
print('admissions with no registered delirium assessment: ', adm_nan)
print('patients with no registered delirium assessment: ', pt_nan)
print()
print('--------------------------------------------------')
print()
n_incl_adm = len(df['ADMISSION_ID'].unique())
n_incl_pt = len(df['PATIENT_ID'].unique())
print('admissions included for analysis: ', n_incl_adm)
print('patients included for analysis: ', n_incl_pt)
print()
print('--------------------------------------------------')
print()
admission_ids = list(df['ADMISSION_ID'].unique())
ADMISSION = pd.read_pickle('/project/M-ABeICU176709/ABeICU/data/ADMISSIONS.pickle', compression = 'zip')
ADMISSION = ADMISSION.loc[(ADMISSION['ADMISSION_ID'].isin(admission_ids)) & (ADMISSION['DELIRIUM_FLAG'] == 1)]
adm_wdel = len(ADMISSION)
pt_wdel = len(ADMISSION['PATIENT_ID'].unique())
print('admissions with at least one episode of delirium: ', adm_wdel)
print('patients with at least one episode of delirium: ', pt_wdel)
print('percentage admissions with at least one episode of delirium: ', adm_wdel / n_incl_adm)
print('patients with at least one episode of delirium: ', pt_wdel / n_incl_pt)
print()
print('--------------------------------------------------')
print()
#opt = 'sites'
opt = 'years'
df_train = pd.read_pickle(f'/project/M-ABeICU176709/delirium/data/revision/train_{opt}.pickle', compression = 'zip')
df_calibration = pd.read_pickle(f'/project/M-ABeICU176709/delirium/data/revision/calibration_{opt}.pickle', compression = 'zip')
df_test = pd.read_pickle(f'/project/M-ABeICU176709/delirium/data/revision/test_{opt}.pickle', compression = 'zip')
y_12h_train = pickle.load(open(f'/project/M-ABeICU176709/delirium/data/revision/preprocessed/y_12h_train_{opt}.pickle', 'rb'))
y_24h_train = pickle.load(open(f'/project/M-ABeICU176709/delirium/data/revision/preprocessed/y_24h_train_{opt}.pickle', 'rb'))
y_12h_calibration = pickle.load(open(f'/project/M-ABeICU176709/delirium/data/revision/preprocessed/y_12h_calibration_{opt}.pickle', 'rb'))
y_24h_calibration = pickle.load(open(f'/project/M-ABeICU176709/delirium/data/revision/preprocessed/y_24h_calibration_{opt}.pickle', 'rb'))
y_12h_test = pickle.load(open(f'/project/M-ABeICU176709/delirium/data/revision/preprocessed/y_12h_test_{opt}.pickle', 'rb'))
y_24h_test = pickle.load(open(f'/project/M-ABeICU176709/delirium/data/revision/preprocessed/y_24h_test_{opt}.pickle', 'rb'))
ADMISSION = pd.read_pickle('/project/M-ABeICU176709/ABeICU/data/ADMISSIONS.pickle', compression = 'zip')
ADMISSION.set_index('ADMISSION_ID', inplace = True)
# train
adm_ids_train = df_train['ADMISSION_ID'].unique()
no_pt_train = len(df_train['PATIENT_ID'].unique())
no_adm_train = len(df_train['ADMISSION_ID'].unique())
no_adm_wdel_train = len(ADMISSION.loc[(ADMISSION.index.isin(adm_ids_train)) & (ADMISSION['DELIRIUM_FLAG'] == 1) ])
perc_adm_wdel_train = no_adm_wdel_train / no_adm_train
no_pt_wdel_train = len(ADMISSION.loc[(ADMISSION.index.isin(adm_ids_train)) & (ADMISSION['DELIRIUM_FLAG'] == 1)]['PATIENT_ID'].unique())
perc_pt_wdel_train = no_pt_wdel_train / no_pt_train
no_inst_train = len(y_12h_train)
no_inst_wdel_12h_train = y_12h_train.sum()
perc_inst_wdel_12h_train = no_inst_wdel_12h_train / no_inst_train
no_inst_wdel_24h_train = y_24h_train.sum()
perc_inst_wdel_24h_train = no_inst_wdel_24h_train / no_inst_train
# calibration
adm_ids_calibration = df_calibration['ADMISSION_ID'].unique()
no_pt_calibration = len(df_calibration['PATIENT_ID'].unique())
no_adm_calibration = len(df_calibration['ADMISSION_ID'].unique())
no_adm_wdel_calibration = len(ADMISSION.loc[(ADMISSION.index.isin(adm_ids_calibration)) & (ADMISSION['DELIRIUM_FLAG'] == 1) ])
perc_adm_wdel_calibration = no_adm_wdel_calibration / no_adm_calibration
no_pt_wdel_calibration = len(ADMISSION.loc[(ADMISSION.index.isin(adm_ids_calibration)) & (ADMISSION['DELIRIUM_FLAG'] == 1)]['PATIENT_ID'].unique())
perc_pt_wdel_calibration = no_pt_wdel_calibration / no_pt_calibration
no_inst_calibration = len(y_12h_calibration)
no_inst_wdel_12h_calibration = y_12h_calibration.sum()
perc_inst_wdel_12h_calibration = no_inst_wdel_12h_calibration / no_inst_calibration
no_inst_wdel_24h_calibration = y_24h_calibration.sum()
perc_inst_wdel_24h_calibration = no_inst_wdel_24h_calibration / no_inst_calibration
# test
adm_ids_test = df_test['ADMISSION_ID'].unique()
if opt == 'years':
adm_ids_test = [i for i in adm_ids_test if i not in adm_ids_train]
df_test = df_test.loc[df_test['ADMISSION_ID'].isin(adm_ids_test)]
no_pt_test = len(df_test['PATIENT_ID'].unique())
no_adm_test = len(df_test['ADMISSION_ID'].unique())
no_adm_wdel_test = len(ADMISSION.loc[(ADMISSION.index.isin(adm_ids_test)) & (ADMISSION['DELIRIUM_FLAG'] == 1) ])
perc_adm_wdel_test = no_adm_wdel_test / no_adm_test
no_pt_wdel_test = len(ADMISSION.loc[(ADMISSION.index.isin(adm_ids_test)) & (ADMISSION['DELIRIUM_FLAG'] == 1)]['PATIENT_ID'].unique())
perc_pt_wdel_test = no_pt_wdel_test / no_pt_test
no_inst_test = len(y_12h_test)
no_inst_wdel_12h_test = y_12h_test.sum()
perc_inst_wdel_12h_test = no_inst_wdel_12h_test / no_inst_test
no_inst_wdel_24h_test = y_24h_test.sum()
perc_inst_wdel_24h_test = no_inst_wdel_24h_test / no_inst_test
#####################################################
print('patients included in the train data set: ', no_pt_train)
print('admissions included in the train data set: ', no_adm_train)
print('patients with at least one episode of delirium: ', no_pt_wdel_train, perc_pt_wdel_train)
print('admmissions with at least one episode of delirium: ', no_adm_wdel_train, perc_adm_wdel_train)
print('prediction instances included for analysis: ', no_inst_train)
print('with delirium in 0-12 hours: ', no_inst_wdel_12h_train)
print('percentage with delirium in 0-12 hours: ', no_inst_wdel_12h_train / no_inst_train)
print('with delirium in 12-24 hours: ', no_inst_wdel_24h_train)
print('percentage with delirium in 12-24 hours: ', no_inst_wdel_24h_train / no_inst_train)
print()
print('--------------------------------------------------')
print()
print('patients included in the calibration data set: ', no_pt_calibration)
print('admissions included in the calibration data set: ', no_adm_calibration)
print('patients with at least one episode of delirium: ', no_pt_wdel_calibration, perc_pt_wdel_calibration)
print('admmissions with at least one episode of delirium: ', no_adm_wdel_calibration, perc_adm_wdel_calibration)
print('prediction instances included for analysis: ', no_inst_calibration)
print('with delirium in 0-12 hours: ', no_inst_wdel_12h_calibration)
print('percentage with delirium in 0-12 hours: ', no_inst_wdel_12h_calibration / no_inst_calibration)
print('with delirium in 12-24 hours: ', no_inst_wdel_24h_calibration)
print('percentage with delirium in 12-24 hours: ', no_inst_wdel_24h_calibration / no_inst_calibration)
print()
print('--------------------------------------------------')
print()
print('patients included in the test data set: ', no_pt_test)
print('admissions included in the test data set: ', no_adm_test)
print('patients with at least one episode of delirium: ', no_pt_wdel_test, perc_pt_wdel_test)
print('admmissions with at least one episode of delirium: ', no_adm_wdel_test, perc_adm_wdel_test)
print('prediction instances included for analysis: ', no_inst_test)
print('with delirium in 0-12 hours: ', no_inst_wdel_12h_test)
print('percentage with delirium in 0-12 hours: ', no_inst_wdel_12h_test / no_inst_test)
print('with delirium in 12-24 hours: ', no_inst_wdel_24h_test)
print('percentage with delirium in 12-24 hours: ', no_inst_wdel_24h_test / no_inst_test)
print()
print('--------------------------------------------------')
print()
|
{"hexsha": "97d85ac719d9639d2d41b5734e9dc85380ccab47", "size": 9579, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/revision/12_supplemental_3.py", "max_stars_repo_name": "data-intelligence-for-health-lab/delirium_prediction", "max_stars_repo_head_hexsha": "a0a25819ef6c98e32563b4e3b986c1a26fc30ed7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/revision/12_supplemental_3.py", "max_issues_repo_name": "data-intelligence-for-health-lab/delirium_prediction", "max_issues_repo_head_hexsha": "a0a25819ef6c98e32563b4e3b986c1a26fc30ed7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/revision/12_supplemental_3.py", "max_forks_repo_name": "data-intelligence-for-health-lab/delirium_prediction", "max_forks_repo_head_hexsha": "a0a25819ef6c98e32563b4e3b986c1a26fc30ed7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.5139664804, "max_line_length": 147, "alphanum_fraction": 0.7328531162, "include": true, "reason": "import numpy", "num_tokens": 2705}
|
"""configurator class allows to load any python file by its filename
and store the contents in a namespace
namespace elements are accessible throught both key access or member acess"""
import runpy
from os import path
from collections import OrderedDict
from weakref import WeakKeyDictionary
import numpy
meta = WeakKeyDictionary()
init_globals = dict( percent=0.01, numpy=numpy )
def set_option(nd, option, value):
"""Set NesteDict oprion:
verbose
createmissing
split # TODO: make splitting by dot optional
"""
meta[nd][option]=value
def process_key(key):
listkey = None
if isinstance( key, str ):
if '.' in key:
listkey = tuple(key.split('.'))
elif isinstance( key, (list, tuple) ):
listkey = ()
for sk in key:
if isinstance(sk, (list, tuple)):
listkey+=tuple(sk)
else:
listkey+=sk,
if listkey:
return listkey[0], listkey[1:]
return key, None
class NestedDict(object):
__parent__ = None
def __init__(self, iterable=None, **kwargs):
super(NestedDict, self).__setattr__('__storage__', OrderedDict())
meta[self] = dict()
if iterable:
if isinstance(iterable, dict):
iterable = sorted(iterable.items())
self.__import__(OrderedDict(iterable))
if kwargs:
self.__import__(OrderedDict(sorted(kwargs.items())))
def __repr__(self):
return self.__storage__.__repr__().replace('OrderedDict(', 'NestedDict(', 1)
def __str__(self, margin='', nested=False, width=None):
if not self.__bool__():
return '${}'
res='${\n'
margin+=' '
for k, v in self.items(nested=nested):
if nested:
k = '.'.join(k)
if width is None:
res+='{margin}{key} : '.format(margin=margin, key=k)
else:
res+='{margin}{key:{width}} : '.format(margin=margin, key=k, width=width)
if isinstance( v, NestedDict ):
res+=v.__str__(margin, nested)
elif isinstance( v, str ):
res+=repr(v)
else:
res+=str(v)
res+=',\n'
margin=margin[:-2]
return res+margin+'}'
def __bool__(self):
return bool(self.__storage__)
def __len__(self):
return len(self.__storage__)
def __dir__(self):
return dir(type(self)) + list(self.keys())
def _set_parent(self, parent):
super(NestedDict, self).__setattr__('__parent__', parent)
def parent(self, n=1):
if n==1:
return self.__parent__
if n==0:
return self
if n<0:
raise Exception('Invalid parent depth')
if self.__parent__ is None:
raise Exception('No parent')
return self.__parent__.parent( n-1 )
def parent_key(self):
if self.__parent__ is None:
return None
for k, v in self.__parent__.items():
if v is self:
return k
raise KeyError( "Failed to determine own key in the parent dictionary" )
def get(self, key, *args, **kwargs):
key, rest=process_key(key)
if rest:
sub = self.__storage__.get(key)
if sub is None:
if args:
return args[0]
raise KeyError( "No nested key '%s'"%key )
return sub.get( rest, *args, **kwargs )
types=kwargs.pop('types', None)
if key==():
obj = self
else:
obj=self.__storage__.get(key, *args, **kwargs)
if types:
if not isinstance(obj, types):
if isinstance(types, tuple):
raise Exception('The field "{}" is expected to be of one of types {}, not {}'.format(key, str([t.__name__ for t in types]), type(obj).__name__))
else:
raise Exception('The field "{}" is expected to be of type {}, not {}'.format(key, types.__name__, type(obj).__name__))
return obj
def __getitem__(self, key):
key, rest=process_key(key)
if rest:
return self.__storage__.__getitem__(key).__getitem__( rest )
if key==():
return self
try:
return self.__storage__.__getitem__(key)
except KeyError as e:
if meta[self].get('createmissing', False):
return self(key)
raise
__getattr__ = __getitem__
def set(self, key, value):
if isinstance(value, dict):
value = NestedDict(value)
if isinstance(value, NestedDict):
value._set_parent( self )
key, rest=process_key(key)
if rest:
if not key in self.__storage__:
cfg = self.__storage__[key]=NestedDict()
cfg._set_parent( self )
return cfg.set( rest, value )
return self.__storage__.get(key).set( rest, value )
self.__storage__[key] = value
__setattr__ = set
__setitem__= set
def setdefault(self, key, value):
if isinstance(value, dict):
value = NestedDict(value)
if isinstance(value, NestedDict):
value._set_parent( self )
key, rest=process_key(key)
if rest:
if not key in self.__storage__:
cfg = self.__storage__[key]=NestedDict()
cfg._set_parent( self )
return cfg.setdefault( rest, value )
return self.__storage__.get(key).setdefault( rest, value )
return self.__storage__.setdefault(key, value)
def __iter__(self):
return iter(self.__storage__)
def values(self, nested=False):
for v in self.__storage__.values():
if nested and isinstance(v, NestedDict):
for nv in v.values(nested=True):
yield nv
else:
yield v
def items(self, nested=False):
if nested:
for k, v in self.__storage__.items():
if isinstance(v, NestedDict):
for nk, nv in v.items(nested=True):
yield (k,)+nk, nv
else:
yield (k,), v
else:
for k, v in self.__storage__.items():
yield k, v
def keys(self, nested=False):
if nested:
for k, v in self.__storage__.items():
if isinstance(v, NestedDict):
for nk in v.keys(nested=True):
yield (k,)+nk
else:
yield k,
else:
for k in self.__storage__.keys():
yield k
def __contains__(self, key):
key, rest=process_key(key)
if not self.__storage__.__contains__(key):
return False
if rest:
return self.__storage__.get(key).__contains__(rest)
return True
def __call__(self, key):
if isinstance( key, (list, tuple) ):
key, rest = key[0], key[1:]
if rest:
if not key in self.__storage__:
cfg = self.__storage__[key]=NestedDict()
cfg._set_parent( self )
return cfg.__call__( rest )
return self.__storage__.get(key).__call__(rest)
if isinstance( key, str ):
if '.' in key:
return self.__call__(key.split('.'))
other = self.__storage__.get(key, None)
if other is None:
value = self.__storage__[key] = NestedDict()
value._set_parent( self )
return value
if isinstance(other, NestedDict):
return other
raise KeyError( "Can not create nested configuration as soon as soon as the key '%s' already exists"%key )
def __load__(self, filename, subst=[]):
if subst:
if subst=='default':
dirname = path.dirname(__file__)
tokens = dirname.split('/')
gna_basedir = '/'.join(tokens[:-2])
default_confs = [gna_basedir+'/config', gna_basedir+'/config_local']
subst = dict( key='location', values=default_confs )
if type(subst) in [ list, tuple ]:
filenames = [ filename.format( s ) for s in subst ]
elif isinstance(subst, dict):
filenames = [ filename.format( **{ subst['key']: v } ) for v in subst['values'] ]
else:
raise Exception( "Unsupported 'subst' type "+type(subst).__name__.__repr__() )
else:
filenames = [ filename ]
unimportant = False
for filename in filenames:
if unimportant and not path.isfile( filename ):
if meta[self].get('verbose', False):
print( 'Skipping nonexistent file', filename )
continue
dic = self.__load_dic__(filename, dictonly=True)
self.__import__(dic)
unimportant = True
def __load_dic__(self, filename, dictonly=False):
print('Loading config file:', filename)
dic = runpy.run_path(filename, init_globals )
for k in init_globals:
if dic[k]==init_globals[k]:
del dic[k]
if dictonly:
return dic
return NestedDict(dic)
def __import__(self, dic):
for k, v in dic.items():
if isinstance(k, str) and k.startswith('__'):
continue
if meta[self].get('verbose', False):
if k in self:
print( 'Reset', k, 'to', v.__repr__() )
else:
print( 'Set', k, 'to', v.__repr__() )
self.__setattr__(k, v)
def __prefetch_covariances(dic, cov_pathes=[]):
import os
for cov_path in cov_pathes:
for cov_file in os.listdir( cov_path ):
print("Importing covariance from {} ".format(cov_file) )
module_path = path.join( cov_path, cov_file )
loaded = runpy.run_path( module_path )
if not dic.get( 'covariances', None ):
dic['covariances'] = NestedDict()
try:
name = loaded.pop( 'name' )
dic['covariances'][name] = dict( loaded )
except KeyError:
print( 'Failed to extract covariance from {}.'
' Check the naming conventions'.format(path) )
def configurator(filename=None, dic={}, **kwargs):
self = NestedDict()
prefetch = kwargs.pop( 'prefetch', True )
if filename:
self['@loaded_from']=filename
meta[self]['verbose']=kwargs.pop( 'debug', False )
if filename:
self.__load__( filename, **kwargs )
elif dic:
self.__import__( dic )
if prefetch:
__prefetch_covariances( dic=self, cov_pathes=self.get('covariance_path', []) )
return self
class uncertain(object):
def __init__(self, central, uncertainty=None, mode='', label=''):
if isinstance(uncertainty, str):
uncertainty, mode, label=None, uncertainty, mode
assert mode in ['absolute', 'relative', 'percent', 'fixed', 'free'], 'Unsupported uncertainty mode '+mode
assert (mode in ['fixed', 'free'])==(uncertainty is None), 'Inconsistent mode and uncertainty'
if mode=='percent':
mode='relative'
uncertainty*=0.01
if mode=='relative':
assert central!=0, 'Central value should differ from 0 for relative uncertainty'
self.central = central
self.uncertainty = uncertainty
self.mode = mode
self.label = label
def get_unc(self):
if self.mode=='relative':
relunc = self.uncertainty
elif self.mode=='absolute':
relunc = self.uncertainty/self.central
elif self.mode=='fixed':
return None
elif self.mode=='free':
return float('inf')
else:
raise Exception('Unsupported mode '+self.mode)
return uncertain(1.0, relunc, mode='absolute')
def __str__(self):
res = '{central:.6g}'.format(central=self.central)
if self.mode=='fixed':
return res
if self.mode=='relative':
sigma = self.central*self.uncertainty
relsigma = self.uncertainty
else:
sigma = self.uncertainty
relsigma = sigma/self.central
res +=( '±{sigma:.6g}'.format(sigma=sigma) )
if self.central:
res+=( ' [{relsigma:.6g}%]'.format(relsigma=relsigma*100.0) )
return res
def __repr__(self):
return 'uncertain({central!r}, {uncertainty!r}, {mode!r})'.format( **self.__dict__ )
def uncertaindict(*args, **kwargs):
common = dict()
missing = []
for s in ['central', 'uncertainty', 'mode', 'label']:
if s in kwargs:
common[s]=kwargs.pop(s)
else:
missing.append(s)
res = OrderedDict( *args, **kwargs )
for k, v in res.items():
kcommon = common.copy()
if isinstance(v, dict):
kcommon.update( v )
else:
if isinstance( v, (int, float) ):
v = (v, )
kcommon.update( zip( missing, v ) )
res[k] = uncertain( **kcommon )
return res
init_globals['load'] = configurator
init_globals['uncertain'] = uncertain
init_globals['uncertaindict'] = uncertaindict
|
{"hexsha": "518997b15048ea4e603eabc100276f64915a3e0d", "size": 13652, "ext": "py", "lang": "Python", "max_stars_repo_path": "pylib/gna/configurator.py", "max_stars_repo_name": "gnafit/gna", "max_stars_repo_head_hexsha": "c1a58dac11783342c97a2da1b19c97b85bce0394", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-10-14T01:06:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-02T16:33:06.000Z", "max_issues_repo_path": "pylib/gna/configurator.py", "max_issues_repo_name": "gnafit/gna", "max_issues_repo_head_hexsha": "c1a58dac11783342c97a2da1b19c97b85bce0394", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pylib/gna/configurator.py", "max_forks_repo_name": "gnafit/gna", "max_forks_repo_head_hexsha": "c1a58dac11783342c97a2da1b19c97b85bce0394", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8868778281, "max_line_length": 164, "alphanum_fraction": 0.5410928802, "include": true, "reason": "import numpy", "num_tokens": 3049}
|
"""
insert_location(ex::Expr, location)
Insert a symbolic representation of `location` into the arguments of an `expression`.
Used in the `@at` macro for specifying the location of an `AbstractOperation`.
"""
function insert_location!(ex::Expr, location)
if ex.head === :call && ex.args[1] ∈ operators
push!(ex.args, ex.args[end])
ex.args[3:end-1] .= ex.args[2:end-2]
ex.args[2] = location
end
for arg in ex.args
insert_location!(arg, location)
end
return nothing
end
"Fallback for when `insert_location` is called on objects other than expressions."
insert_location!(anything, location) = nothing
"""
@at location abstract_operation
Modify the `abstract_operation` so that it returns values at
`location`, where `location` is a 3-tuple of `Face`s and `Cell`s.
"""
macro at(location, abstract_operation)
insert_location!(abstract_operation, location)
return esc(abstract_operation)
end
|
{"hexsha": "7dd87f464360bd40b953a47df4dc7b8e6cccb833", "size": 961, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/AbstractOperations/at.jl", "max_stars_repo_name": "ascheinb/Oceananigans.jl", "max_stars_repo_head_hexsha": "52bfeb09e3562f639deb32b8807f32a88e3a1cfa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-02T05:32:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-02T05:32:51.000Z", "max_issues_repo_path": "src/AbstractOperations/at.jl", "max_issues_repo_name": "ssghost/Oceananigans.jl", "max_issues_repo_head_hexsha": "a8ea2555c36bdc7a29161d6cb692d8bdf6290156", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/AbstractOperations/at.jl", "max_forks_repo_name": "ssghost/Oceananigans.jl", "max_forks_repo_head_hexsha": "a8ea2555c36bdc7a29161d6cb692d8bdf6290156", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4571428571, "max_line_length": 85, "alphanum_fraction": 0.7013527575, "num_tokens": 230}
|
import numpy as np
from .abstruct import Channel
from ..util import pishifts
class DepolarizingChannel(Channel):
def __init__(self, n, p, seed=None):
super().__init__(n, seed)
if not isinstance(p, (list, np.ndarray)):
p = [p]
self.param_len = len(p)
self._channel_parameter["p"] = p
def channel(self, n=-1, ind=0):
self._channel_output["E"] *= 0
if n > 0:
self.n = n
r = np.random.random(self.n)
p = self._channel_parameter["p"][ind]
x_pos = np.where(r <= p / 3)[0]
z_pos = np.intersect1d(np.where(r < p)[0], np.where(r > 2 * p / 3)[0])
y_pos = np.intersect1d(np.where(r < 2 * p / 3)[0], np.where(r > p / 3)[0])
self._channel_output["E"][x_pos] = 1 # X
self._channel_output["E"][self.n + z_pos] = 1 # Z
self._channel_output["E"][y_pos] = 1 # Y
self._channel_output["E"][self.n + y_pos] = 1 # Y
return self._channel_output
def get_param(self, ind):
p = self.channel_parameter["p"][ind]
return [1 - p, p / 3, p / 3, p / 3]
class BitFlipChannel(Channel):
def __init__(self, n, tx, tz, seed=None):
super().__init__(n, seed)
if not isinstance(tx, (list, np.ndarray)):
tx = np.array([tx])
if not isinstance(tz, (list, np.ndarray)):
tz = np.array([tz])
if not isinstance(tx, np.ndarray):
tx = np.array(tx)
if not isinstance(tz, np.ndarray):
tz = np.array(tz)
self.param_len = min(len(tx), len(tz))
self._channel_parameter["tx"] = tx
self._channel_parameter["tz"] = tz
self.px = self._channel_parameter["tx"] / n
self.pz = self._channel_parameter["tz"] / n
def channel(self, n=-1, ind=0):
self._channel_output["E"] *= 0
if n > 0:
self.n = n
self.px = self._channel_parameter["tx"] / self.n
self.pz = self._channel_parameter["tz"] / self.n
(self._channel_output["E"][: self.n])[: self._channel_parameter["tx"][ind]] = 1
(self._channel_output["E"][self.n :])[: self._channel_parameter["tz"][ind]] = 1
np.random.shuffle(self._channel_output["E"][: self.n])
np.random.shuffle(self._channel_output["E"][self.n :])
return self._channel_output
def get_param(self, ind):
return [
(1 - self.px[ind]) * (1 - self.pz[ind]),
(1 - self.px[ind]) * self.pz[ind],
(1 - self.pz[ind]) * self.px[ind],
self.px[ind] * self.pz[ind],
]
class PauliChannel(Channel):
def __init__(self, n, px, pz, seed=None):
super().__init__(n, seed)
if not isinstance(px, (list, np.ndarray)):
px = [px]
self._channel_parameter["px"] = px
if not isinstance(pz, (list, np.ndarray)):
pz = [pz]
if not isinstance(px, np.ndarray):
px = np.array(px)
if not isinstance(pz, np.ndarray):
pz = np.array(pz)
self.param_len = min(len(px), len(pz))
self._channel_parameter["pz"] = pz
def channel(self, n=-1, ind=0):
self._channel_output["E"] *= 0
if n > 0:
self.n = n
x_pos = np.where(
np.random.random(self.n) <= self._channel_parameter["px"][ind]
)[0]
z_pos = np.where(
np.random.random(self.n) <= self._channel_parameter["pz"][ind]
)[0]
self._channel_output["E"][x_pos] = 1 # X
self._channel_output["E"][self.n + z_pos] = 1 # Z
return self._channel_output
def get_param(self, ind):
px = self._channel_parameter["px"][ind]
pz = self._channel_parameter["pz"][ind]
return [
(1 - px) * (1 - pz),
(1 - pz) * px,
(1 - px[ind]) * pz,
px[ind] * pz[ind],
]
class GaussianQuantumChannel(Channel):
def __init__(self, n, sigma, seed=None, bit_flip=True, phase_flip=True):
super().__init__(n, seed)
if not isinstance(sigma, (list, np.ndarray)):
sigma = np.array([sigma])
if not isinstance(sigma, (np.ndarray)):
sigma = np.array(sigma)
self._channel_parameter["sigma"] = sigma
self._bit_flip = bit_flip
self._phase_flip = phase_flip
self.param_len = len(sigma)
def channel(self, n=-1, ind=0):
"""
Return the ""Analog information""
"""
if n > 0:
self.n = n
if self.bit_flip:
self._channel_output["DELTA"][: self.n] = np.random.normal(
scale=self.channel_parameter["sigma"][ind], size=2 * self.n
)[: self.n]
if self.phase_flip:
self._channel_output["DELTA"][self.n :] = np.random.normal(
scale=self.channel_parameter["sigma"][ind], size=2 * self.n
)[self.n :]
self._channel_output["E"] *= 0
# 2√π>|E|>√π => error
delta = pishifts(self.channel_output["DELTA"])
e_pos = np.where(np.abs(delta) >= np.sqrt(np.pi) / 2)[0]
self._channel_output["E"][e_pos] = 1
return self.channel_output
def get_param(self, ind):
return self._channel_parameter["sigma"][ind]
@property
def bit_flip(self):
return self._bit_flip
@property
def phase_flip(self):
return self._phase_flip
@property
def n(self):
return self._n
@n.setter
def n(self, n):
if n <= 0:
raise ValueError("PHYSICAL_QUBIT is more than 0")
self._n = n
self._channel_output["E"] = np.zeros(2 * n, dtype="i1")
self._channel_output["DELTA"] = np.zeros(2 * n)
|
{"hexsha": "8b758d736699a3b89ce194e0ebe3696c95c2474d", "size": 5731, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyqecc/channel/channel.py", "max_stars_repo_name": "shim98a/pyqecc", "max_stars_repo_head_hexsha": "69965b4cab947718bf42adfaf79f63b25da61f66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyqecc/channel/channel.py", "max_issues_repo_name": "shim98a/pyqecc", "max_issues_repo_head_hexsha": "69965b4cab947718bf42adfaf79f63b25da61f66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2022-01-17T13:57:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T06:39:45.000Z", "max_forks_repo_path": "pyqecc/channel/channel.py", "max_forks_repo_name": "shim98a/pyqecc", "max_forks_repo_head_hexsha": "69965b4cab947718bf42adfaf79f63b25da61f66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-11T14:51:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T13:20:28.000Z", "avg_line_length": 34.1130952381, "max_line_length": 87, "alphanum_fraction": 0.5391729192, "include": true, "reason": "import numpy", "num_tokens": 1581}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 11:34:48 2020
@author: roume
"""
import os, cv2
import numpy as np
import matplotlib as plot
from skimage.measure import label, regionprops
from scipy.special import expit as sigmoid # used for numerical stability
inputPath = 'C:/Users/roume/PycharmProjects/ECE276A_Project1_StopSignDetector/venv/validset'
outputPath = 'C:/Users/roume/PycharmProjects/ECE276A_Project1_StopSignDetector/venv/validset/masks2'
weights = np.array([ -262.96844267, -2173.87815346, -5326.3292497, 5020.39252529]) # trained on only the training set, 50/50 red/notRed, 500000 pixels total
weights = np.array([-1051.98, 7793.46, -17668, 7497.26]) # trained on both training and validation, used problematic pixels for notRed dataset, 600000 pixels total
weights = np.array([-1198.70681915, 2975.83862542, -19176.06813495, 9368.96722851]) # removed a third of the red pixels so the entire dataset would be evenly split 50/50
os.chdir(inputPath)
filelist = os.listdir(inputPath)
for file in filelist[:]:
if not(file.endswith(".jpg")):
filelist.remove(file)
else:
img = cv2.imread(file)
x = img.flatten().reshape(img.shape[0] * img.shape[1], img.shape[2])
intercept = np.ones((x.shape[0], 1))
x = np.concatenate((intercept, x), axis = 1)
mask = np.matmul(x, weights)
mask = mask.reshape(img.shape[0], img.shape[1])
# mask = 1.0 * (mask > 0) # comment this out if outputing; other programs read binary images as grayscale, so they appear black
cv2.imshow('', mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
os.chdir(outputPath)
cv2.imwrite(file, mask)
os.chdir(inputPath)
|
{"hexsha": "bdb951f06959d17340bf2f4be5aea7fbad654db2", "size": 1777, "ext": "py", "lang": "Python", "max_stars_repo_path": "pr1_code/stop_sign_test_imgs.py", "max_stars_repo_name": "roumenguha/Stop_Sign_Detection_Redux", "max_stars_repo_head_hexsha": "30e00b9a2726b81a531a5d51e007272c9b207171", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-18T00:00:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-18T00:00:56.000Z", "max_issues_repo_path": "pr1_code/stop_sign_test_imgs.py", "max_issues_repo_name": "roumenguha/Stop_Sign_Detection_Redux", "max_issues_repo_head_hexsha": "30e00b9a2726b81a531a5d51e007272c9b207171", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pr1_code/stop_sign_test_imgs.py", "max_forks_repo_name": "roumenguha/Stop_Sign_Detection_Redux", "max_forks_repo_head_hexsha": "30e00b9a2726b81a531a5d51e007272c9b207171", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8085106383, "max_line_length": 169, "alphanum_fraction": 0.6702307259, "include": true, "reason": "import numpy,from scipy", "num_tokens": 499}
|
import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from sklearn.datasets import make_spd_matrix
from sklearn.covariance import empirical_covariance
from sklearn.metrics import mean_squared_error
from torch.utils.data import DataLoader
import numpy as np
from synthetic import train_nn
from scipy.special import logsumexp
from sklearn.linear_model import RidgeCV
import wandb
class SyntheticMomentsDataset(Dataset):
def __init__(self, N=1000, n_samples=500, n_dim=2, output_names=None, distribution='normal', random_state=0):
self.N = N
self.n_samples = n_samples
self.n_dim = n_dim
self.Xs = []
self.ys = []
print('Dataset output', output_names)
for n in range(N):
x = []
y = []
#for d in range(n_distr):
if distribution == "normal":
cov = make_spd_matrix(self.n_dim)
X = np.random.RandomState(random_state).multivariate_normal(np.random.randn(self.n_dim), cov, size=self.n_samples, check_valid='warn', tol=1e-8)
elif distribution == "t":
X = np.random.RandomState(random_state).standard_t(np.random.randint(10, 20, size=self.n_dim), size=(self.n_samples, self.n_dim))
elif distribution == "gamma":
X = np.random.RandomState(random_state).gamma(np.random.randint(1, 30, size=self.n_dim), np.random.randint(1, 30, size=self.n_dim), size=(self.n_samples, self.n_dim))
stds = np.std(X, axis=0)
covariances = np.array(empirical_covariance(X)[0, 1]).reshape(1, 1)
moments = [np.square(stds.ravel()), covariances.ravel()]
moments = np.concatenate(moments).ravel()
self.Xs.append(np.array(moments))
y = [np.square(covariances)/2 * logsumexp(stds, axis=0).ravel()]
self.ys.append(np.array(y))
def __getitem__(self, index):
return self.Xs[index], self.ys[index], np.arange(len(self.ys[index])).reshape(-1, 1)
def __len__(self):
return self.N
if __name__ == "__main__":
import argparse
import wandb
parser = argparse.ArgumentParser(description='Results summary')
parser.add_argument('--name', type=str)
parser.add_argument('--seed_weights', default=0, type=int)
parser.add_argument('--seed_dataset', default=0, type=int)
parser.add_argument('--distribution', default='normal', help='normal|gamma|t', type=str)
parser.add_argument('--wandb_test', action='store_true')
parser.add_argument('--cpu', default='store_true')
# dummy, placeholder for future
parser.add_argument('--output_name', default='cov-var-function')
args = parser.parse_args()
if args.wandb_test:
wandb.init(project='wandb_test')
else:
if args.name:
wandb.init(project='synthetic-moments1', name=args.name)
else:
wandb.init(project='synthetic-moments1')
train = SyntheticMomentsDataset(10000, args.sample_size, args.features, args.output_name, args.distribution, args.seed_dataset)
test = SyntheticMomentsDataset(1000, args.sample_size, args.features,args.output_name, args.distribution, args.seed_dataset)
X_tr = np.array([train[i][0] for i in range(len(train))])
X_ts = np.array([test[i][0] for i in range(len(test))])
Y_tr = np.array([train[i][1] for i in range(len(train))])
Y_ts = np.array([test[i][1] for i in range(len(test))])
mdl = RidgeCV()
mdl.fit(X_tr, Y_tr)
wandb.run.summary["best_tr_loss"] = mean_squared_error(Y_tr, mdl.predict(X_tr))
wandb.run.summary["best_ts_loss"] = mean_squared_error(Y_ts, mdl.predict(X_ts))
|
{"hexsha": "a2bc36165b2a8d929b6b36b9802511ed4fc804fe", "size": 3692, "ext": "py", "lang": "Python", "max_stars_repo_path": "synthetic_moments_ridge.py", "max_stars_repo_name": "veronicatozzo/distribution-network", "max_stars_repo_head_hexsha": "585a3294f09ade975d921e28576c24007e5a41de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "synthetic_moments_ridge.py", "max_issues_repo_name": "veronicatozzo/distribution-network", "max_issues_repo_head_hexsha": "585a3294f09ade975d921e28576c24007e5a41de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synthetic_moments_ridge.py", "max_forks_repo_name": "veronicatozzo/distribution-network", "max_forks_repo_head_hexsha": "585a3294f09ade975d921e28576c24007e5a41de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.4352941176, "max_line_length": 182, "alphanum_fraction": 0.6625135428, "include": true, "reason": "import numpy,from scipy", "num_tokens": 913}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 11 02:15:26 2018
@author: AshwinAmbal
Description:The code below is used to extract 'k' similar location for a given
location based on the tf, df or idf values as specified in the sample inputs file.
"""
import xml.etree.ElementTree as ET
import pandas as pd
from scipy.spatial.distance import cosine
#from scipy.spatial.distance import euclidean
#from sklearn.metrics.pairwise import cosine_distances
tree = ET.parse("C:\\MWDB Project\\devset\\devset_topics.xml")
# get root element
root = tree.getroot()
# iterate location items
location_id = dict()
for item in root.findall('./topic'):
# iterate child elements of item
for child in item:
# special checking for namespace object content:media
if child.tag == 'number':
num = int(child.text)
if child.tag == 'title':
title = child.text
location_id[num] = title
# Reading the sample input file
file1 = open("C:\\MWDB Project\\Code\\Sample_Inputs_Loc.txt","r", encoding="utf8")
list_of_lines = file1.readlines()
for line in list_of_lines:
words = line.split()
loc = location_id[int(words[0])]
input_type = words[1]
input_type = input_type.replace("-", "_")
k = int(words[2])
# Reading the csv file into the dataframe
df = pd.read_csv("C:\\MWDB Project\\Code\\CSV\\Task_3\\{}_Loc.csv".format(input_type))
# Finding cosine similarity between the columns
i = 0
sim = list()
for column in df:
if column != "Annotations" and column != loc:
sim.append([1 - cosine(df[loc], df[column]), column])
results = sorted(sim, reverse=True)[:k]
final_list = list()
for row in results:
temp_list = list()
for annot, data1, data2 in zip(df["Annotations"], df[loc], df[row[1]]):
if data1 != 0 and data2 != 0:
temp_list.append([annot, abs(data1 - data2)])
temp_list = sorted(temp_list, key = lambda x : x[1])[:3]
final_list.append(temp_list)
print("\n\nMost Similar Locations for Loc ID = ", words[0], ", Location = ", loc, ", k = ", k, " Metric = ", input_type)
print("\nUSING COSINE SIMILARITY:")
for j in range(0,k):
print(results[j] + final_list[j])
print()
"""
# Finding euclidean similarity between the columns
sim_euc = list()
for column in df:
if column != "Annotations" and column != loc:
sim_euc.append([1 - euclidean(df[column], df[loc]), column])
results_euc = sorted(sim_euc, reverse = True)[:k]
final_list = list()
for row in results_euc:
temp_list = list()
for annot, data1, data2 in zip(df["Annotations"], df[loc], df[row[1]]):
if data1 != 0 and data2 != 0:
temp_list.append([annot, abs(data1 - data2)])
temp_list = sorted(temp_list, key = lambda x : x[1])[:3]
final_list.append(temp_list)
print("USING EUCLIDEAN SIMILARITY:")
for j in range(0,k):
print(results_euc[j] + final_list[j])
"""
|
{"hexsha": "0249cc3ab41c3d7c20e088bbfbb9e8cb237aeab2", "size": 3122, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/Tasks/Task_3.py", "max_stars_repo_name": "ajayguna-96/Finding-Similarity-between-Vector-Models-of-images-using-Similarity-Scores", "max_stars_repo_head_hexsha": "c957024ed3be75e2e6bbe6ea790e24d04e58df3d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-04T13:40:34.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-04T13:40:34.000Z", "max_issues_repo_path": "Code/Tasks/Task_3.py", "max_issues_repo_name": "ajayguna-96/Finding-Similarity-between-Vector-Models-of-images-using-Similarity-Scores", "max_issues_repo_head_hexsha": "c957024ed3be75e2e6bbe6ea790e24d04e58df3d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/Tasks/Task_3.py", "max_forks_repo_name": "ajayguna-96/Finding-Similarity-between-Vector-Models-of-images-using-Similarity-Scores", "max_forks_repo_head_hexsha": "c957024ed3be75e2e6bbe6ea790e24d04e58df3d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0786516854, "max_line_length": 124, "alphanum_fraction": 0.6121076233, "include": true, "reason": "from scipy", "num_tokens": 801}
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the staff page processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl.testing import absltest
import numpy as np
from moonlight import engine
from moonlight import structure as structure_module
from moonlight.glyphs import testing as glyphs_testing
from moonlight.protobuf import musicscore_pb2
from moonlight.staves import staff_processor
from moonlight.staves import testing as staves_testing
class StaffProcessorTest(absltest.TestCase):
def testGetPage_x_scale(self):
# Random staffline images matching the dimensions of PREDICTIONS.
dummy_stafflines = np.random.random((2, 3, 5, 6))
classifier = glyphs_testing.DummyGlyphClassifier(glyphs_testing.PREDICTIONS)
image = np.random.randint(0, 255, (30, 20), dtype=np.uint8)
staves = staves_testing.FakeStaves(
image_t=image,
staves_t=np.asarray([[[0, 10], [19, 10]], [[0, 20], [19, 20]]],
np.int32),
staffline_distance_t=np.asarray([5, 20], np.int32),
staffline_thickness_t=np.asarray(1, np.int32))
structure = structure_module.create_structure(
image, lambda unused_image: staves)
class DummyStafflineExtractor(object):
"""A placeholder for StafflineExtractor.
It only contains the constants necessary to scale the x coordinates.
"""
staffline_distance_multiple = 2
target_height = 10
omr = engine.OMREngine(lambda _: classifier)
page = omr.process_image(
# Feed in a dummy image. It doesn't matter because FakeStaves has
# hard-coded staff values.
np.random.randint(0, 255, (100, 100)),
process_structure=False)
page = staff_processor.StaffProcessor(structure,
DummyStafflineExtractor()).apply(page)
self.assertEqual(len(page.system[0].staff), 2)
# The first staff has a staffline distance of 5.
# The extracted staffline slices have an original height of
# staffline_distance * staffline_distance_multiple (10), which equals
# target_height here, so there is no scaling.
self.assertEqual(
musicscore_pb2.Staff(glyph=page.system[0].staff[0].glyph),
glyphs_testing.GLYPHS_PAGE.system[0].staff[0])
# Glyphs in the second staff have a scaled x coordinate.
self.assertEqual(
len(page.system[0].staff[1].glyph),
len(glyphs_testing.GLYPHS_PAGE.system[0].staff[1].glyph))
for glyph in glyphs_testing.GLYPHS_PAGE.system[0].staff[1].glyph:
expected_glyph = copy.deepcopy(glyph)
# The second staff has a staffline distance of 20. The extracted staffline
# slice would be 4 times the size of the scaled staffline, so x
# coordinates are scaled by 4. Also, the glyphs may be in a different
# order.
expected_glyph.x *= 4
self.assertIn(expected_glyph, page.system[0].staff[1].glyph)
if __name__ == '__main__':
absltest.main()
|
{"hexsha": "7277394ffd0b908ebcb766a8095d4b73aa4fa763", "size": 3581, "ext": "py", "lang": "Python", "max_stars_repo_path": "moonlight/staves/staff_processor_test.py", "max_stars_repo_name": "lithomas1/moonlight", "max_stars_repo_head_hexsha": "cd22d6f47bbcf043a0027e91d342ae25dfc8a30a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "moonlight/staves/staff_processor_test.py", "max_issues_repo_name": "lithomas1/moonlight", "max_issues_repo_head_hexsha": "cd22d6f47bbcf043a0027e91d342ae25dfc8a30a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-25T16:15:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-25T16:15:42.000Z", "max_forks_repo_path": "moonlight/staves/staff_processor_test.py", "max_forks_repo_name": "lithomas1/moonlight", "max_forks_repo_head_hexsha": "cd22d6f47bbcf043a0027e91d342ae25dfc8a30a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2359550562, "max_line_length": 80, "alphanum_fraction": 0.7143256074, "include": true, "reason": "import numpy", "num_tokens": 857}
|
import os
import unittest
import aspecd.exceptions
import numpy as np
import trepr.exceptions
import trepr.processing
import trepr.dataset
ROOTPATH = os.path.split(os.path.abspath(__file__))[0]
class TestPretriggerOffsetCompensation(unittest.TestCase):
def setUp(self):
self.processing = trepr.processing.PretriggerOffsetCompensation()
source = os.path.join(ROOTPATH, 'testdata/speksim/')
importer = trepr.dataset.DatasetFactory()
self.dataset = importer.get_dataset(source=source)
def test_processing(self):
self.dataset.process(self.processing)
def test_zeropoint_index(self):
self.processing.dataset = self.dataset
self.processing._get_zeropoint_index()
self.assertNotEqual(0, self.processing.parameters['zeropoint_index'])
class TestBackgroundCorrection(unittest.TestCase):
def setUp(self):
self.processing = trepr.processing.BackgroundCorrection()
self.dataset = trepr.dataset.ExperimentalDataset()
def create_dataset(self):
data = np.ones([500, 200])
data[10:-10] += 4
self.dataset.data.data = data
def test_description(self):
self.create_dataset()
self.assertNotIn('abstract', self.processing.description.lower())
self.assertIn('background', self.processing.description.lower())
def test_1D_dataset_raises(self):
self.dataset.data.data = np.ones(200)
with self.assertRaises(aspecd.exceptions.NotApplicableToDatasetError):
self.dataset.process(self.processing)
def test_too_small_dataset_raises(self):
self.dataset.data.data = np.ones((5, 20))
with self.assertRaises(aspecd.exceptions.NotApplicableToDatasetError):
self.dataset.process(self.processing)
def test_perform_task_with_defaults(self):
self.create_dataset()
self.dataset.process(self.processing)
self.assertGreater(5.0, self.dataset.data.data[16, 0])
def test_perform_task_with_list_one_element(self):
self.create_dataset()
self.processing.parameters['num_profiles'] = [-10]
self.dataset.process(self.processing)
self.assertGreater(5.0, self.dataset.data.data[16, 0])
def test_perform_task_with_list_two_elements(self):
self.create_dataset()
self.dataset.data.data[-10:] += 2
self.processing.parameters['num_profiles'] = [10, -10]
self.dataset.process(self.processing)
self.assertGreater(5.0, self.dataset.data.data[16, 0])
self.assertAlmostEqual(0, self.dataset.data.data[0, 0])
self.assertAlmostEqual(0, self.dataset.data.data[-1, 0], 2)
def test_perform_task_with_list_two_other_elements(self):
self.create_dataset()
self.dataset.data.data[-10:] += 2
self.processing.parameters['num_profiles'] = [5, 10]
self.dataset.process(self.processing)
self.assertGreater(5.0, self.dataset.data.data[16, 0])
self.assertAlmostEqual(0, self.dataset.data.data[0, 0])
self.assertAlmostEqual(0, self.dataset.data.data[-1, 0], 2)
def test_perform_task_with_list(self):
self.create_dataset()
self.dataset.data.data[-10:] += 2
self.processing.parameters['num_profiles'] = [5, 10]
self.dataset.process(self.processing)
self.assertGreater(5.0, self.dataset.data.data[16, 0])
self.assertAlmostEqual(0, self.dataset.data.data[0, 0])
self.assertAlmostEqual(0, self.dataset.data.data[-1, 0], 2)
class TestTriggerAutodetection(unittest.TestCase):
def setUp(self):
self.dataset = trepr.dataset.ExperimentalDataset()
self.processing = trepr.processing.TriggerAutodetection()
@staticmethod
def create_time_trace():
x1 = np.linspace(0, 10, 1000)
x2 = np.linspace(-2, 2, 200)
y1 = np.exp(-x1)
y1 = np.insert(y1, 0, np.zeros(100))
amplitude = 1
position = 0
width = 2
y2 = amplitude * np.exp(-(x2 - position) ** 2 / 2 * width ** 2)
convolved = np.convolve(y1, y2)
return convolved + np.random.random(len(convolved))
def test_instantiate_class(self):
pass
def test_has_appropriate_description(self):
self.assertIn('autodetect trigger position',
self.processing.description.lower())
def test_with_1D_dataset_without_time_axis_raises(self):
dataset = trepr.dataset.ExperimentalDataset()
with self.assertRaises(aspecd.exceptions.NotApplicableToDatasetError):
dataset.process(self.processing)
def test_with_2D_dataset_without_time_axis_raises(self):
dataset = trepr.dataset.ExperimentalDataset()
dataset.data.data = np.random.random([5, 5])
with self.assertRaises(aspecd.exceptions.NotApplicableToDatasetError):
dataset.process(self.processing)
def test_with_2D_dataset_with_time_axis(self):
dataset = trepr.dataset.ExperimentalDataset()
dataset.data.data = np.random.random([5, 100])
dataset.data.axes[1].quantity = 'time'
dataset.process(self.processing)
def test_with_1D_dataset_sets_trigger_position(self):
self.dataset.data.data = self.create_time_trace()
self.dataset.data.axes[0].quantity = 'time'
self.dataset.process(self.processing)
self.assertGreaterEqual(
self.dataset.metadata.transient.trigger_position, 100)
def test_with_2D_dataset_sets_trigger_position(self):
self.dataset.data.data = np.tile(self.create_time_trace(), (5, 1))
self.dataset.data.axes[1].quantity = 'time'
self.dataset.process(self.processing)
self.assertGreaterEqual(
self.dataset.metadata.transient.trigger_position, 100)
def test_with_1D_dataset_sets_time_axis(self):
self.dataset.data.data = self.create_time_trace()
self.dataset.data.axes[0].quantity = 'time'
self.dataset.process(self.processing)
self.assertLess(self.dataset.data.axes[0].values[0], 0)
def test_with_2D_dataset_sets_time_axis(self):
self.dataset.data.data = np.tile(self.create_time_trace(), (5, 1))
self.dataset.data.axes[1].quantity = 'time'
self.dataset.process(self.processing)
self.assertLess(self.dataset.data.axes[1].values[0], 0)
|
{"hexsha": "0ffd498e451d84afc9042e69ea316d38cda7ea11", "size": 6347, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_processing.py", "max_stars_repo_name": "tillbiskup/trepr", "max_stars_repo_head_hexsha": "4d24cb9ce5b89bfd3f9ee2016c8c493c4d76ea7f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_processing.py", "max_issues_repo_name": "tillbiskup/trepr", "max_issues_repo_head_hexsha": "4d24cb9ce5b89bfd3f9ee2016c8c493c4d76ea7f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_processing.py", "max_forks_repo_name": "tillbiskup/trepr", "max_forks_repo_head_hexsha": "4d24cb9ce5b89bfd3f9ee2016c8c493c4d76ea7f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9386503067, "max_line_length": 78, "alphanum_fraction": 0.6837876162, "include": true, "reason": "import numpy", "num_tokens": 1432}
|
# -*- coding: utf-8 -*-
"""
Dataset for Mask R-CNN
Configurations and data loading code for COCO format.
@author: Mattia Brusamento
"""
import os
import sys
import time
import numpy as np
import json
# Download and install the Python coco tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools import mask as maskUtils
from mrcnn import model as modellib, utils
############################################################
# Dataset
############################################################
class TrashDataset(utils.Dataset):
def load_trash(self, data_dir, anno_file):
print("Loading Trash Data:" + str(data_dir) + "" + str(anno_file))
trash = COCO(os.path.join(data_dir, anno_file))
# Add classes
class_ids = sorted(trash.getCatIds())
for i in class_ids:
self.add_class("trash", i, trash.loadCats(i)[0]["name"])
# Add images
image_ids = list(trash.imgs.keys())
for i in image_ids:
current_annotation = []
for a in trash.loadAnns(trash.getAnnIds()):
if a["image_id"] == i:
current_annotation = a
self.add_image(
"trash", image_id=i,
path=os.path.join(data_dir, trash.imgs[i]['file_name']),
width=trash.imgs[i]["width"],
height=trash.imgs[i]["height"],
annotations=current_annotation) # annotations=[a for a in trash.loadAnns(trash.getAnnIds()) if a['image_id'] == str(i)]
return trash
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
image_info = self.image_info[image_id]
instance_masks = []
class_ids = []
annotation = image_info["annotations"]
if len(annotation) > 0:
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
# for annotation in annotations:
class_id = self.map_source_class_id(
"trash.{}".format(annotation["category_id"]))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() > 0:
# continue
# Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
# For crowd masks, annToMask() sometimes returns a mask
# smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if len(class_ids) > 0:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(TrashDataset, self).load_mask(image_id)
def image_reference(self, image_id):
"""Return a link to the image in the trash Website."""
info = self.image_info[image_id]
if info["source"] == "trash":
info['file_name']
else:
super(TrashDataset, self).image_reference(image_id)
######################################################################
#### The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
|
{"hexsha": "1c1c7b0585a19297352558b99758edbabe1b30bc", "size": 5703, "ext": "py", "lang": "Python", "max_stars_repo_path": "materials_trash_detector/trash_dataset.py", "max_stars_repo_name": "Waste-NANDO/Mask_RCNN", "max_stars_repo_head_hexsha": "272fbeba35e62ce3a6772c9c70e62da9fcb4a40e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "materials_trash_detector/trash_dataset.py", "max_issues_repo_name": "Waste-NANDO/Mask_RCNN", "max_issues_repo_head_hexsha": "272fbeba35e62ce3a6772c9c70e62da9fcb4a40e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "materials_trash_detector/trash_dataset.py", "max_forks_repo_name": "Waste-NANDO/Mask_RCNN", "max_forks_repo_head_hexsha": "272fbeba35e62ce3a6772c9c70e62da9fcb4a40e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-25T17:29:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-25T17:29:33.000Z", "avg_line_length": 38.5337837838, "max_line_length": 136, "alphanum_fraction": 0.565842539, "include": true, "reason": "import numpy", "num_tokens": 1247}
|
import nose
import unittest
import numpy as np
from pandas import Series, date_range
import pandas.util.testing as tm
from pandas.tseries.util import pivot_annual, isleapyear
class TestPivotAnnual(unittest.TestCase):
"""
New pandas of scikits.timeseries pivot_annual
"""
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_annual(ts, 'D')
doy = ts.index.dayofyear
doy[(-isleapyear(ts.index.year)) & (doy >= 60)] += 1
for i in range(1, 367):
subset = ts[doy == i]
subset.index = [x.year for x in subset.index]
tm.assert_series_equal(annual[i].dropna(), subset)
# check leap days
leaps = ts[(ts.index.month == 2) & (ts.index.day == 29)]
day = leaps.index.dayofyear[0]
leaps.index = leaps.index.year
tm.assert_series_equal(annual[day].dropna(), leaps)
def test_weekly(self):
pass
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_annual(ts, 'M')
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = [x.year for x in subset.index]
tm.assert_series_equal(annual[i].dropna(), subset)
def test_period_monthly(self):
pass
def test_period_daily(self):
pass
def test_period_weekly(self):
pass
if __name__ == '__main__':
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
{"hexsha": "02a98858ed80852066036098cbeae6e252364d1f", "size": 1713, "ext": "py", "lang": "Python", "max_stars_repo_path": "pandas/tseries/tests/test_util.py", "max_stars_repo_name": "breisfeld/pandas", "max_stars_repo_head_hexsha": "f1fd50bb8e7603042fe93e01e862766673e33450", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pandas/tseries/tests/test_util.py", "max_issues_repo_name": "breisfeld/pandas", "max_issues_repo_head_hexsha": "f1fd50bb8e7603042fe93e01e862766673e33450", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pandas/tseries/tests/test_util.py", "max_forks_repo_name": "breisfeld/pandas", "max_forks_repo_head_hexsha": "f1fd50bb8e7603042fe93e01e862766673e33450", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-29T08:17:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-29T08:17:09.000Z", "avg_line_length": 26.3538461538, "max_line_length": 72, "alphanum_fraction": 0.5913601868, "include": true, "reason": "import numpy", "num_tokens": 453}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 17 15:11:05 2020
@author: mlampert
"""
import os
import copy
import pandas
import numpy as np
import pickle
import flap
import flap_nstx
thisdir = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(thisdir,'../flap_nstx.cfg')
flap.config.read(file_name=fn)
flap_nstx.register()
from flap_nstx.gpi import calculate_nstx_gpi_avg_frame_velocity, calculate_nstx_gpi_smooth_velocity
from flap_nstx.thomson import flap_nstx_thomson_data, get_nstx_thomson_gradient, get_fit_nstx_thomson_profiles
from matplotlib.backends.backend_pdf import PdfPages
def get_nstx_efit_energy_data(exp_id=None,
efit_tree='EFIT02'):
"""
\WB \EFIT01::TOP.RESULTS.AEQDSK:WB Poloidal magnetic field stored energy
\WBDOT \EFIT01::TOP.RESULTS.AEQDSK:WBDOT time derivative of poloidal magnetic energy
\WDIA \EFIT01::TOP.RESULTS.AEQDSK:WDIA diamagnetic energy
\WMHD \EFIT01::TOP.RESULTS.AEQDSK:WMHD total plasma energy
\WPDOT \EFIT01::TOP.RESULTS.AEQDSK:WPDOT time derivative of plasma stored energy
"""
if exp_id is None:
raise ValueError('exp_id (shotnumber) needs to be set for gathering the efit energy data! Returning...')
data={}
d=flap.get_data('NSTX_MDSPlus',exp_id=exp_id,name='\\'+efit_tree+'::\WB',object_name='WB')
data['Time']=d.coordinate('Time')[0]
data['Poloidal']=d.data
d=flap.get_data('NSTX_MDSPlus',exp_id=exp_id,name='\\'+efit_tree+'::\WDIA',object_name='WDIA')
data['Diamagnetic']=d.data
d=flap.get_data('NSTX_MDSPlus',exp_id=exp_id,name='\\'+efit_tree+'::\WMHD',object_name='WMHD')
data['Total']=d.data
return data
def calculate_elm_properties_vs_energy_drop(elm_window=500e-6,
elm_duration=100e-6,
after_time_threshold=2e-3,
averaging='before_after', #The type of averaging for the _avg results ['before_after', 'full', 'elm']
recalc=False, #Recalculate the results and do not load from the pickle file
plot=False, #Plot the results with matplotlib
plot_error=False,
pdf=False, #Save the results into a PDF
normalized_structure=True,
normalized_velocity=True,
subtraction_order=1,
dependence_error_threshold=0.5, #Line fitting error dependence relative error threshold. Results under this value are plotted into a text file.
test=False,
plot_linear_fit=True,
plot_energy=False,
plot_only_good=False,
):
if averaging not in ['before_after', 'full', 'elm']:
raise ValueError('Averaging should be one of the following: before_after, full, elm')
flap.delete_data_object('*')
wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']
result_filename=wd+'/processed_data/'+'elm_energy_dependence_'+averaging+'_avg'
if normalized_structure:
result_filename+='_ns'
if normalized_velocity:
result_filename+='_nv'
result_filename+='_so'+str(subtraction_order)
scaling_db_file_before=result_filename+'_before.pickle'
scaling_db_file_after=result_filename+'_after.pickle'
if test:
import matplotlib.pyplot as plt
plt.figure()
if plot_energy:
import matplotlib.pyplot as plt
plt.figure()
energy_pdf=PdfPages(wd+'/plots/all_energy.pdf')
if not os.path.exists(scaling_db_file_before) or not os.path.exists(scaling_db_file_after) or recalc:
#Load and process the ELM database
database_file='/Users/mlampert/work/NSTX_workspace/db/ELM_findings_mlampert_velocity_good.csv'
db=pandas.read_csv(database_file, index_col=0)
elm_index=list(db.index)
#Defining the variables for the calculation
for ind_before_after in range(2):
energy_results={'Total':[],
'Diamagnetic':[],
'Poloidal':[]}
gpi_results_avg={'Velocity ccf':[],
'Velocity str avg':[],
'Velocity str max':[],
'Size avg':[],
'Size max':[],
'Area avg':[],
'Area max':[],
'Elongation avg':[],
'Elongation max':[],
'Angle avg':[],
'Angle max':[],
'Str number':[],
}
gpi_results_max=copy.deepcopy(gpi_results_avg)
if ind_before_after == 0:
scaling_db_file = scaling_db_file_before
else:
scaling_db_file = scaling_db_file_after
for index_elm in range(len(elm_index)):
elm_time=db.loc[elm_index[index_elm]]['ELM time']/1000.
shot=int(db.loc[elm_index[index_elm]]['Shot'])
if normalized_velocity:
if normalized_structure:
str_add='_ns'
else:
str_add=''
filename=flap_nstx.analysis.filename(exp_id=shot,
working_directory=wd+'/processed_data',
time_range=[elm_time-2e-3,elm_time+2e-3],
comment='ccf_velocity_pfit_o'+str(subtraction_order)+'_ct_0.6_fst_0.0'+str_add+'_nv',
extension='pickle')
else:
filename=wd+'/processed_data/'+db.loc[elm_index[index_elm]]['Filename']+'.pickle'
#grad.slice_data(slicing=time_slicing)
status=db.loc[elm_index[index_elm]]['OK/NOT OK']
if status != 'NO':
velocity_results=pickle.load(open(filename, 'rb'))
for key in gpi_results_avg:
ind_nan=np.isnan(velocity_results[key])
velocity_results[key][ind_nan]=0.
time=velocity_results['Time']
elm_time_interval_ind=np.where(np.logical_and(time >= elm_time-elm_duration,
time <= elm_time+elm_duration))
nwin=int(elm_window/2.5e-6)
n_elm=int(elm_duration/2.5e-6)
elm_time=(time[elm_time_interval_ind])[np.argmin(velocity_results['Frame similarity'][elm_time_interval_ind])]
elm_time_ind=int(np.argmin(np.abs(time-elm_time)))
energy_data=get_nstx_efit_energy_data(exp_id=shot)
if plot_energy:
plt.cla()
for key in ['Poloidal', 'Diamagnetic', 'Total']:
plt.plot(energy_data['Time'], energy_data[key], label=key)
plt.plot([elm_time,elm_time],[0,1e6])
plt.title('Energy vs time for ' + str(shot)+' @ '+f"{elm_time:.6f}")
plt.legend(loc='upper right', shadow=True)
plt.xlabel('Time')
plt.ylabel('Energy [J]')
plt.xlim(elm_time-50e-3,elm_time+50e-3)
energy_pdf.savefig()
index_time=np.argmin(np.abs(energy_data['Time']-elm_time))
if energy_data['Time'][index_time] <= elm_time and ind_before_after == 1:
index_time = index_time + 1
if energy_data['Time'][index_time] > elm_time and ind_before_after == 0:
index_time = index_time - 1
energy_status='OK'
if (ind_before_after == 0 and (energy_data['Time'][index_time+1]-elm_time > after_time_threshold) or
ind_before_after == 1 and (energy_data['Time'][index_time]-elm_time > after_time_threshold)
):
energy_status='NO'
if (energy_status != 'NO'):
for keys in energy_results.keys():
print(keys)
energy_results[keys].append(energy_data[keys][index_time])
for key in gpi_results_avg:
if averaging == 'before_after':
#This separates the before and after times with the before and after Thomson results.
if ind_before_after == 0:
gpi_results_avg[key].append(np.mean(velocity_results[key][elm_time_ind-nwin:elm_time_ind],axis=0))
elif ind_before_after == 1:
gpi_results_avg[key].append(np.mean(velocity_results[key][elm_time_ind+n_elm:elm_time_ind+nwin],axis=0))
elif averaging == 'full':
gpi_results_avg[key].append(np.mean(velocity_results[key][elm_time_ind+n_elm:elm_time_ind+nwin],axis=0))
elif averaging == 'elm':
gpi_results_avg[key].append(velocity_results[key][elm_time_ind])
if len(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin].shape) == 2:
max_ind_0=np.argmax(np.abs(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,0]),axis=0)
max_ind_1=np.argmax(np.abs(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,1]),axis=0)
gpi_results_max[key].append([velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,0][max_ind_0],
velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin,1][max_ind_1]])
else:
max_ind=np.argmax(np.abs(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin]),axis=0)
gpi_results_max[key].append(velocity_results[key][elm_time_ind-nwin:elm_time_ind+nwin][max_ind])
for variable in [energy_results, gpi_results_avg, gpi_results_max]:
for key in variable:
variable[key]=np.asarray(variable[key])
pickle.dump((energy_results, gpi_results_avg, gpi_results_max), open(scaling_db_file,'wb'))
if ind_before_after == 0:
energy_results_before=copy.deepcopy(energy_results)
gpi_results_avg_before=copy.deepcopy(gpi_results_avg)
gpi_results_max_before=copy.deepcopy(gpi_results_max)
if ind_before_after == 1:
energy_results_after=copy.deepcopy(energy_results)
gpi_results_avg_after=copy.deepcopy(gpi_results_avg)
gpi_results_max_after=copy.deepcopy(gpi_results_max)
else:
energy_results_before, gpi_results_avg_before, gpi_results_max_before = pickle.load(open(scaling_db_file_before,'rb'))
energy_results_after, gpi_results_avg_after, gpi_results_max_after = pickle.load(open(scaling_db_file_after,'rb'))
if plot_energy:
energy_pdf.close()
if plot:
import matplotlib
matplotlib.use('QT5Agg')
import matplotlib.pyplot as plt
else:
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from numpy import logical_and as AND
non_zero_ind_before=AND(energy_results_before['Poloidal'] != 0.,
AND(energy_results_before['Diamagnetic'] != 0.,
energy_results_before['Total'] != 0.))
non_zero_ind_after=AND(energy_results_after['Poloidal'] != 0.,
AND(energy_results_after['Diamagnetic'] != 0.,
energy_results_after['Total'] != 0.))
energy_results_before['Poloidal change']=energy_results_after['Poloidal']-energy_results_before['Poloidal']
energy_results_after['Poloidal change']=energy_results_after['Poloidal']-energy_results_before['Poloidal']
energy_results_before['Diamagnetic change']=energy_results_after['Diamagnetic']-energy_results_before['Diamagnetic']
energy_results_after['Diamagnetic change']=energy_results_after['Diamagnetic']-energy_results_before['Diamagnetic']
energy_results_before['Total change']=energy_results_after['Total']-energy_results_before['Total']
energy_results_after['Total change']=energy_results_after['Total']-energy_results_before['Total']
energy_results_before['Poloidal relative change']=energy_results_after['Poloidal']/energy_results_before['Poloidal']-1
energy_results_after['Poloidal relative change']=energy_results_after['Poloidal']/energy_results_before['Poloidal']-1
energy_results_before['Diamagnetic relative change']=energy_results_after['Diamagnetic']/energy_results_before['Diamagnetic']-1
energy_results_after['Diamagnetic relative change']=energy_results_after['Diamagnetic']/energy_results_before['Diamagnetic']-1
energy_results_before['Total relative change']=energy_results_after['Total']/energy_results_before['Total']-1
energy_results_after['Total relative change']=energy_results_after['Total']/energy_results_before['Total']-1
energy_results_before['Poloidal relative change']=(energy_results_after['Poloidal']-energy_results_before['Poloidal'])/energy_results_before['Total']
energy_results_after['Poloidal relative change']=(energy_results_after['Poloidal']-energy_results_before['Poloidal'])/energy_results_before['Total']
energy_results_before['Diamagnetic relative change']=(energy_results_after['Diamagnetic']-energy_results_before['Diamagnetic'])/energy_results_before['Total']
energy_results_after['Diamagnetic relative change']=(energy_results_after['Diamagnetic']-energy_results_before['Diamagnetic'])/energy_results_before['Total']
energy_results_before['Total relative change']=(energy_results_after['Total']-energy_results_before['Total'])/energy_results_before['Total']
energy_results_after['Total relative change']=(energy_results_after['Total']-energy_results_before['Total'])/energy_results_before['Total']
del AND
y_variables_before=[gpi_results_avg_before,gpi_results_max_before]
y_variables_after=[gpi_results_avg_after,gpi_results_max_after]
title_addon=['(temporal avg)','(range max)']
radvert=['radial', 'vertical']
for var_ind in range(len(y_variables_after)):
if pdf:
filename=result_filename.replace('processed_data', 'plots')+'_'+title_addon[var_ind].replace('(','').replace(')','').replace(' ','_')
pdf_pages=PdfPages(filename+'.pdf')
file=open(filename+'_linear_dependence.txt', 'wt')
for key_gpi in y_variables_after[var_ind].keys():
for key_grad in energy_results_after.keys():
for i in range(2):
if len(y_variables_after[var_ind][key_gpi].shape) == 2:
fig, ax = plt.subplots()
y_var_before=y_variables_before[var_ind][key_gpi][:,i][non_zero_ind_before]
non_zero_ind_y_before=np.where(y_var_before != 0.)
y_var_after=y_variables_after[var_ind][key_gpi][:,i][non_zero_ind_after]
non_zero_ind_y_after=np.where(y_var_after != 0.)
gpi_key_str_addon=' '+radvert[i]
elif len(y_variables_after[var_ind][key_gpi].shape) == 1:
if i == 1:
continue
fig, ax = plt.subplots()
y_var_before=y_variables_before[var_ind][key_gpi][non_zero_ind_before]
non_zero_ind_y_before=np.where(y_var_before != 0.)
y_var_after=y_variables_after[var_ind][key_gpi][non_zero_ind_after]
non_zero_ind_y_after=np.where(y_var_after != 0.)
gpi_key_str_addon=' '
for before_after_str in ['Before', 'After']:
if before_after_str == 'Before':
y_var=y_var_before
non_zero_ind_y=non_zero_ind_y_before
non_zero_ind=non_zero_ind_before
energy_results=energy_results_before
color='tab:blue'
fit_color='blue'
if before_after_str == 'After':
y_var=y_var_after
non_zero_ind_y=non_zero_ind_y_after
non_zero_ind=non_zero_ind_after
energy_results=energy_results_after
color='red'
fit_color='black'
ind_nan=np.logical_not(np.isnan(energy_results[key_grad][non_zero_ind][non_zero_ind_y]))
try:
val,cov=np.polyfit(energy_results[key_grad][non_zero_ind][non_zero_ind_y][ind_nan],
y_var[non_zero_ind_y][ind_nan],
1,
cov=True)
except:
val=[np.nan, np.nan]
cov=np.asarray([[np.nan,np.nan],
[np.nan,np.nan]])
a=val[0]
b=val[1]
delta_a=np.sqrt(cov[0,0])
delta_b=np.sqrt(cov[1,1])
good_plot=True
if (np.abs(delta_a/a) < dependence_error_threshold and
np.abs(delta_b/b) < dependence_error_threshold):
file.write(before_after_str+' result:\n')
file.write(key_gpi+' '+gpi_key_str_addon+' = '+f"{b:.4f}"+' +- '+f"{delta_b:.4f}"+' + ('+f"{a:.4f}"+'+-'+f"{delta_a:.4f}"+') * '+key_grad+' energy_results'+'\n')
file.write('Relative error: delta_b/b: '+f"{np.abs(delta_b/b*100):.6f}"+'% , delta_a/a: '+f"{np.abs(delta_a/a*100):.6f}"+'%\n\n')
elif plot_only_good:
good_plot=False
if good_plot:
ax.scatter(energy_results[key_grad][non_zero_ind][non_zero_ind_y],
y_var[non_zero_ind_y],
marker='o',
color=color)
if plot_linear_fit:
ax.plot(energy_results[key_grad][non_zero_ind][non_zero_ind_y][ind_nan],
a*energy_results[key_grad][non_zero_ind][non_zero_ind_y][ind_nan]+b,
color=fit_color)
ind_sorted=np.argsort(energy_results[key_grad][non_zero_ind][non_zero_ind_y][ind_nan])
ax.fill_between(energy_results[key_grad][non_zero_ind][non_zero_ind_y][ind_nan][ind_sorted],
(a-delta_a)*energy_results[key_grad][non_zero_ind][non_zero_ind_y][ind_nan][ind_sorted]+(b-delta_b),
(a+delta_a)*energy_results[key_grad][non_zero_ind][non_zero_ind_y][ind_nan][ind_sorted]+(b+delta_b),
color=color,
alpha=0.3)
ax.fill_between(energy_results[key_grad][non_zero_ind][non_zero_ind_y][ind_nan][ind_sorted],
(a-delta_a)*energy_results[key_grad][non_zero_ind][non_zero_ind_y][ind_nan][ind_sorted]+(b+delta_b),
(a+delta_a)*energy_results[key_grad][non_zero_ind][non_zero_ind_y][ind_nan][ind_sorted]+(b-delta_b),
color=color,
alpha=0.3)
if key_grad == 'Pressure':
dimension='[kPa/m]'
elif key_grad == 'Temperature':
dimension='[keV/m]'
elif key_grad == 'Density':
dimension='[1/m3/m]'
else:
dimension=''
if 'Velocity' in key_gpi:
dimension_gpi='[m/s]'
if 'Size' in key_gpi:
dimension_gpi='[m]'
ax.set_xlabel(key_grad+' energy results '+dimension)
ax.set_ylabel(key_gpi+' '+title_addon[var_ind]+' '+dimension_gpi)
ax.set_title(key_grad+' energy results'+' vs. '+key_gpi+gpi_key_str_addon+' '+title_addon[var_ind])
fig.tight_layout()
if pdf:
pdf_pages.savefig()
if pdf:
pdf_pages.close()
if not plot:
plt.close('all')
file.close()
|
{"hexsha": "f3fc8d525f1c173731fc48ad09ca613915c4089c", "size": 22881, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/plot_elm_properties_vs_energy_drop.py", "max_stars_repo_name": "fusion-flap/flap_nstx_gpi", "max_stars_repo_head_hexsha": "cf7d4bdecea8fd7434f8f7eb64e1a7b13fc0f759", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/plot_elm_properties_vs_energy_drop.py", "max_issues_repo_name": "fusion-flap/flap_nstx_gpi", "max_issues_repo_head_hexsha": "cf7d4bdecea8fd7434f8f7eb64e1a7b13fc0f759", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-03T22:25:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-06T10:31:11.000Z", "max_forks_repo_path": "analysis/plot_elm_properties_vs_energy_drop.py", "max_forks_repo_name": "fusion-flap/flap_nstx_gpi", "max_forks_repo_head_hexsha": "cf7d4bdecea8fd7434f8f7eb64e1a7b13fc0f759", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.9179104478, "max_line_length": 195, "alphanum_fraction": 0.5276430226, "include": true, "reason": "import numpy,from numpy", "num_tokens": 4497}
|
#!/usr/bin/env python
import numpy
from numpy.random import RandomState
from sklearn.datasets import make_friedman1
from sklearn.model_selection import train_test_split
from typing import Union
from backprop.network import Network
_demo_problem_num_train_samples: int = 1000
_demo_problem_num_test_samples: int = 100
_demo_num_uninformative_columns: int = 0
_random_state = 0
def demo_backprop(
num_train_samples: int = _demo_problem_num_train_samples,
num_test_samples: int = _demo_problem_num_test_samples,
num_uninformative_columns: int = _demo_num_uninformative_columns,
random_state: Union[int, None, RandomState] =_random_state
):
random_state = random_state if isinstance(random_state, RandomState) \
else RandomState(random_state)
# make training and test data sets for demo
inputs_train, inputs_test, outputs_train, outputs_test = make_test_problem(
n_train_samples=num_train_samples, n_test_samples=num_test_samples,
n_uninformative=num_uninformative_columns, random_state=random_state
)
# build network
num_inputs = inputs_train.shape[1]
num_outputs = outputs_train.shape[1]
num_hidden = 2 * num_inputs * num_outputs
# make a network with a single hidden layer with num_hidden nodes
network = Network(num_inputs, num_hidden, num_outputs,
random_state=random_state)
# to make two hidden layers, could do:
# network = Network(num_inputs, num_hidden, num_hidden, num_outputs)
# train network on training set
network.train_online(inputs=inputs_train, correct_outputs=outputs_train)
# predict results on test set
predict_test = network.predict(inputs_test)
# calculate error
err = ((predict_test - outputs_test)**2).sum(axis=1).mean(axis=0)
print('Cross-validated error: %.3g' % err)
def make_test_problem(
n_train_samples: int = _demo_problem_num_train_samples,
n_test_samples: int = _demo_problem_num_test_samples,
n_uninformative: int = 0,
random_state: Union[int, None, RandomState] = _random_state
) -> (numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray):
n_samples = n_train_samples + n_test_samples
assert n_uninformative >= 0
n_features = 5 + n_uninformative
inputs, outputs = make_friedman1(
n_samples=n_samples, n_features=n_features, random_state=random_state
)
if inputs.ndim == 1:
inputs = numpy.reshape(inputs, inputs.shape + (1,))
if outputs.ndim == 1:
outputs = numpy.reshape(outputs, outputs.shape + (1,))
inputs_train, inputs_test, outputs_train, outputs_test = train_test_split(
inputs, outputs,
train_size=n_train_samples, test_size=n_test_samples,
random_state=random_state
)
return inputs_train, inputs_test, outputs_train, outputs_test
if __name__ == "__main__":
demo_backprop()
|
{"hexsha": "a1cb9d20eceefda48810b2d69f5413ec1438b335", "size": 2907, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/backprop/backprop_demo.py", "max_stars_repo_name": "TedBrookings/backprop", "max_stars_repo_head_hexsha": "36c09b43e3c81f8506e806ede1435a0144d2f792", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/backprop/backprop_demo.py", "max_issues_repo_name": "TedBrookings/backprop", "max_issues_repo_head_hexsha": "36c09b43e3c81f8506e806ede1435a0144d2f792", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/backprop/backprop_demo.py", "max_forks_repo_name": "TedBrookings/backprop", "max_forks_repo_head_hexsha": "36c09b43e3c81f8506e806ede1435a0144d2f792", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7532467532, "max_line_length": 79, "alphanum_fraction": 0.7368421053, "include": true, "reason": "import numpy,from numpy", "num_tokens": 673}
|
{-# OPTIONS --without-K #-}
module PiG where
import Level as L
open import Data.Empty
open import Data.Unit
open import Data.Sum
open import Data.Product
open import Data.Nat
open import Function
open import Relation.Binary.PropositionalEquality
open import Relation.Binary
------------------------------------------------------------------------------
-- Reasoning about paths
pathInd : ∀ {u u'} → {A : Set u} →
(C : (x y : A) → (x ≡ y) → Set u') →
(c : (x : A) → C x x refl) →
((x y : A) (p : x ≡ y) → C x y p)
pathInd C c x .x refl = c x
trans' : {A : Set} → {x y z : A} → (x ≡ y) → (y ≡ z) → (x ≡ z)
trans' {A} {x} {y} {z} p q =
pathInd
(λ x y p → ((z : A) → (q : y ≡ z) → (x ≡ z)))
(pathInd (λ x z q → x ≡ z) (λ _ → refl))
x y p z q
ap : {A B : Set} {x y : A} → (f : A → B) → (x ≡ y) → (f x ≡ f y)
ap {A} {B} {x} {y} f p =
pathInd
(λ x y p → f x ≡ f y)
(λ x → refl)
x y p
ap× : {A B : Set} {x y : A} {z w : B} → (x ≡ y) → (z ≡ w) →
((x , z) ≡ (y , w))
ap× {A} {B} {x} {y} {z} {w} p₁ p₂ =
pathInd
(λ x y p₁ → ((z : B) (w : B) (p₂ : z ≡ w) → ((x , z) ≡ (y , w))))
(λ x → pathInd
(λ z w p₂ → ((x , z) ≡ (x , w)))
(λ z → refl))
x y p₁ z w p₂
transport : {A : Set} {x y : A} {P : A → Set} → (p : x ≡ y) → P x → P y
transport {A} {x} {y} {P} p =
pathInd
(λ x y p → (P x → P y))
(λ x → id)
x y p
------------------------------------------------------------------------------
-- Codes for our types
data B : Set where
-- no zero because we map types to POINTED SETS
ONE : B
PLUS : B → B → B
TIMES : B → B → B
HALF : B -- used to explore fractionals
{--
Now let's define groups/groupoids
The type 1/2 is modeled by the group { refl , loop }
where loop is self-inverse, i.e., loop . loop = refl
We always get refl, inverses, and associativity for free. We need to
explicitly add the new element 'loop' and the new equation
'loop . loop = refl'
--}
cut-path : {A : Set} → {x : A} → (f g : ( x ≡ x) → (x ≡ x)) → x ≡ x → Set
cut-path f g a = f a ≡ g a
record GroupoidVal : Set₁ where
constructor •[_,_,_,_,_,_]
field
∣_∣ : Set
• : ∣_∣
path : • ≡ • -- the additional path (loop)
-- truncate : trans' path path ≡ refl -- the equivalence used to truncate
-- truncate : (f : • ≡ • → • ≡ •) → (b : • ≡ •) → f b ≡ refl
path-rel₁ : • ≡ • → • ≡ •
path-rel₂ : • ≡ • → • ≡ •
truncate : cut-path path-rel₁ path-rel₂ path
open GroupoidVal public
-- Example:
const-loop : {A : Set} {x : A} → (y : x ≡ x) → x ≡ x
const-loop _ = refl
postulate iloop : tt ≡ tt
itruncate : (f : tt ≡ tt → tt ≡ tt) → cut-path f const-loop iloop
-- (trans' iloop iloop) ≡ refl
2loop : {A : Set} → {x : A} → x ≡ x → x ≡ x
2loop x = trans' x x
half : GroupoidVal
half = •[ ⊤ , tt , iloop , 2loop , const-loop , itruncate 2loop ]
-- Interpretations of types
⟦_⟧ : B → GroupoidVal
⟦ ONE ⟧ = •[ ⊤ , tt , refl , id , id , refl ]
⟦ PLUS b₁ b₂ ⟧ with ⟦ b₁ ⟧ | ⟦ b₂ ⟧
... | •[ B₁ , x₁ , p₁ , f₁ , g₁ , t₁ ] | •[ B₂ , x₂ , p₂ , f₂ , g₂ , t₂ ] =
•[ B₁ ⊎ B₂ , inj₁ x₁ , ap inj₁ p₁ , id , id , refl ]
⟦ TIMES b₁ b₂ ⟧ with ⟦ b₁ ⟧ | ⟦ b₂ ⟧
... | •[ B₁ , x₁ , p₁ , f₁ , g₁ , t₁ ] | •[ B₂ , x₂ , p₂ , f₂ , g₂ , t₂ ] =
•[ B₁ × B₂ , (x₁ , x₂) , ap× p₁ p₂ , id , id , refl ]
⟦ HALF ⟧ = half
-- Combinators
data _⟷_ : B → B → Set₁ where
swap× : {b₁ b₂ : B} → TIMES b₁ b₂ ⟷ TIMES b₂ b₁
record GFunctor (G₁ G₂ : GroupoidVal) : Set where
field
fun : ∣ G₁ ∣ → ∣ G₂ ∣
baseP : fun (• G₁) ≡ (• G₂)
-- dependent paths??
isoP : {x y : ∣ G₁ ∣} (p : x ≡ y) → (fun x ≡ fun y)
eval : {b₁ b₂ : B} → (b₁ ⟷ b₂) → GFunctor ⟦ b₁ ⟧ ⟦ b₂ ⟧
eval swap× =
record {
fun = swap;
baseP = refl;
isoP = λ p → ap swap p
}
------------------------------------------------------------------------------
{-
Some old scribblings
A-2 : Set
A-2 = ⊤
postulate
S⁰ : Set
S¹ : Set
record Trunc-1 (A : Set) : Set where
field
embed : A → Trunc-1 A
hub : (r : S⁰ → Trunc-1 A) → Trunc-1 A
spoke : (r : S⁰ → Trunc-1 A) → (x : S⁰) → r x ≡ hub r
-}
--------------------
record 2Groupoid : Set₁ where
constructor ²[_,_,_]
field
obj : Set
hom : Rel obj L.zero -- paths (loop)
2path : ∀ {a b} → hom a b → hom a b → Set -- 2paths, i.e. path eqns
open 2Groupoid public
hom⊤ : Rel ⊤ L.zero
hom⊤ = _≡_
2path⊤ : Rel (hom⊤ tt tt) L.zero
2path⊤ = _≡_
half' : 2Groupoid
half' = ²[ ⊤ , hom⊤ , 2path⊤ ]
|
{"hexsha": "7e6281c0965cd8b17efa592abbe1101351f9a784", "size": 4722, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "PiG.agda", "max_stars_repo_name": "JacquesCarette/pi-dual", "max_stars_repo_head_hexsha": "003835484facfde0b770bc2b3d781b42b76184c1", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2015-08-18T21:40:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-05T01:07:57.000Z", "max_issues_repo_path": "PiG.agda", "max_issues_repo_name": "JacquesCarette/pi-dual", "max_issues_repo_head_hexsha": "003835484facfde0b770bc2b3d781b42b76184c1", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-06-07T16:27:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-29T20:41:23.000Z", "max_forks_repo_path": "PiG.agda", "max_forks_repo_name": "JacquesCarette/pi-dual", "max_forks_repo_head_hexsha": "003835484facfde0b770bc2b3d781b42b76184c1", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-05-29T01:56:33.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-10T09:47:13.000Z", "avg_line_length": 26.2333333333, "max_line_length": 79, "alphanum_fraction": 0.4434561626, "num_tokens": 1864}
|
using PowerModelsReliability
using PowerModels
using InfrastructureModels
using Ipopt
using Mosek
using Juniper
using Cbc
using CPLEX
using Gurobi
using JuMP
using SCS
scs = JuMP.with_optimizer(SCS.Optimizer, max_iters=100000)
ipopt = JuMP.with_optimizer(Ipopt.Optimizer, tol=1e-6, print_level=0)
cplex = JuMP.with_optimizer(CPLEX.Optimizer)
cbc = JuMP.with_optimizer(Cbc.Optimizer)
gurobi = JuMP.with_optimizer(Gurobi.Optimizer)
mosek = JuMP.with_optimizer(Mosek.Optimizer)
juniper = JuMP.with_optimizer(Juniper.Optimizer, nl_solver = ipopt, mip_solver= cbc, time_limit= 7200)
function build_mn_data(base_data)
mp_data = PowerModels.parse_file(base_data)
mp_data["load"] = mp_data["sc_load"]
n_cont = length(mp_data["contingencies"])
return InfrastructureModels.replicate(mp_data, n_cont)
end
function build_mn_data(base_data_1, base_data_2)
mp_data_1 = PowerModels.parse_file(base_data_1)
mp_data_2 = PowerModels.parse_file(base_data_2)
@assert mp_data_1["per_unit"] == mp_data_2["per_unit"]
@assert mp_data_1["baseMVA"] == mp_data_2["baseMVA"]
mn_data = Dict{String,Any}(
"name" => "$(mp_data_1["name"]) + $(mp_data_2["name"])",
"multinetwork" => true,
"per_unit" => mp_data_1["per_unit"],
"baseMVA" => mp_data_1["baseMVA"],
"nw" => Dict{String,Any}()
)
delete!(mp_data_1, "multinetwork")
delete!(mp_data_1, "per_unit")
delete!(mp_data_1, "baseMVA")
mn_data["nw"]["1"] = mp_data_1
delete!(mp_data_2, "multinetwork")
delete!(mp_data_2, "per_unit")
delete!(mp_data_2, "baseMVA")
mn_data["nw"]["2"] = mp_data_2
return mn_data
end
data = build_mn_data("./test/data/case5_scopf.m")
resultAC_rc = run_scunittfopf(data, ACPPowerModel, ipopt; multinetwork=true, setting = Dict("output" => Dict("branch_flows" => true),"relax_continuous" => true, "relax_absolute_value" => true))
data = build_mn_data("./test/data/case5_scopf.m")
resultAC = run_scunittfopf(data, ACPPowerModel, juniper; multinetwork=true, setting = Dict("output" => Dict("branch_flows" => true),"relax_continuous" => false, "relax_absolute_value" => true))
data = build_mn_data("./test/data/case5_scopf.m")
resultDC_rc = run_scunittfopf(data, DCPPowerModel, cplex; multinetwork=true, setting = Dict("output" => Dict("branch_flows" => true),"relax_continuous" => true, "relax_absolute_value" => false))
data = build_mn_data("./test/data/case5_scopf.m")
resultDC = run_scunittfopf(data, DCPPowerModel, cplex; multinetwork=true, setting = Dict("output" => Dict("branch_flows" => true),"relax_continuous" => false, "relax_absolute_value" => false))
# ,"relax_absolute_value" => true
#TODO
# Fixes to make everything compatible
# Extend data model with HVDC contingencies (Branch + Converter)
# Extend load shedding model to possible DC loads
# Start addig contingency constraints for the DC branches and converter models (first check on paper for issues such voltage etc, get inspiration from transformer model)
#
|
{"hexsha": "a6016f962d8eb6f5b2b1375e9d6802101d80cf9f", "size": 3001, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/model/test_scopf.jl", "max_stars_repo_name": "frederikgeth/PowerModelsReliability.jl", "max_stars_repo_head_hexsha": "a3b53c5f93134e159cd1e1beac39b2e5a4a0c7ba", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2017-10-06T21:43:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-19T12:35:54.000Z", "max_issues_repo_path": "test/model/test_scopf.jl", "max_issues_repo_name": "frederikgeth/PowerModelsReliability", "max_issues_repo_head_hexsha": "a3b53c5f93134e159cd1e1beac39b2e5a4a0c7ba", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2018-02-19T14:37:42.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-21T13:56:40.000Z", "max_forks_repo_path": "test/model/test_scopf.jl", "max_forks_repo_name": "frederikgeth/PowerModelsReliability", "max_forks_repo_head_hexsha": "a3b53c5f93134e159cd1e1beac39b2e5a4a0c7ba", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-10-23T13:10:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-04T12:11:41.000Z", "avg_line_length": 39.4868421053, "max_line_length": 194, "alphanum_fraction": 0.7330889703, "num_tokens": 892}
|
from datetime import datetime
from pdb import set_trace
from time import time
import numpy as np
import tensorflow as tf
import torch
from deep_lagrangian_networks.replay_memory import PyTorchReplayMemory
from deep_lagrangian_networks.utils import init_env, load_dataset
from DeLaN_tensorflow_ddq import DeepLagrangianNetwork
from DeLaN_utils import plot_test2
class Train:
def __init__(self):
# Read the dataset:
n_dof = 2
cuda = 0
# train_data, test_data, self.divider = load_dataset()
train_data, test_data, self.divider = load_dataset(
filename="data/sine_track2.pickle"
)
(
self.train_labels,
self.train_qp,
self.train_qv,
self.train_qa,
self.train_tau,
) = train_data
(
self.test_labels,
self.test_qp,
self.test_qv,
self.test_qa,
self.test_tau,
self.test_m,
self.test_c,
self.test_g,
) = test_data
self.hyper = {
"n_width": 64,
"n_depth": 2,
"diagonal_epsilon": 0.01,
"activation": "softplus",
"b_init": 1.0e-4,
"b_diag_init": 0.001,
"w_init": "xavier_normal",
"gain_hidden": np.sqrt(2.0),
"gain_output": 0.1,
"n_minibatch": 512,
"learning_rate": 5.0e-04,
"weight_decay": 1.0e-5,
"max_epoch": 50000,
}
self.model = DeepLagrangianNetwork(n_dof, **self.hyper)
self.optimizer = tf.keras.optimizers.Adam(
learning_rate=self.hyper["learning_rate"], amsgrad=True
)
# Generate Replay Memory:
mem_dim = ((n_dof,), (n_dof,), (n_dof,), (n_dof,))
self.mem = PyTorchReplayMemory(
self.train_qp.shape[0], self.hyper["n_minibatch"], mem_dim, cuda
)
self.mem.add_samples(
[self.train_qp, self.train_qv, self.train_qa, self.train_tau]
)
# Information for saving model
self.checkpoint = tf.train.Checkpoint(net=self.model)
self.stamp = datetime.fromtimestamp(time()).strftime("%Y%m%d-%H%M%S")
def train(self):
# Print training information
print("\n\n################################################")
print("Characters:")
print(" Test Characters = {0}".format(self.test_labels))
print(" Train Characters = {0}".format(self.train_labels))
print("# Training Samples = {0:05d}".format(int(self.train_qp.shape[0])))
print("")
# Training Parameters:
print("\n################################################")
print("Training Deep Lagrangian Networks (DeLaN):")
for epoch_i in range(self.hyper["max_epoch"]):
l_mem, n_batches = 0.0, 0.0
for q, qd, qdd, tau in self.mem:
q_tf, qd_tf, qdd_tf, tau_tf = self.convert_to_tf(q, qd, qdd, tau)
loss = self.opt(q_tf, qd_tf, qdd_tf, tau_tf)
l_mem += loss
n_batches += 1
l_mem /= float(n_batches)
# if epoch_i == 1 or np.mod(epoch_i, 50) == 0:
print("Epoch {0:05d}: ".format(epoch_i), end=" ")
print("Loss = {0:.3e}\n".format(l_mem))
if np.mod(epoch_i + 1, 1000) == 0:
filename = "trained_models/{}/tf_model_{}".format(
self.stamp, epoch_i + 1
)
self.checkpoint.save("{}".format(filename))
def test(self, filename=None):
if filename is not None:
# Load pre-trained model
self.checkpoint.restore(filename)
# Get test data
q = tf.cast(self.test_qp, tf.float32)
dq = tf.cast(self.test_qv, tf.float32)
tau = tf.cast(self.test_tau, tf.float32)
# Calculate torque using test data
delan_ddq, delan_M, delan_C, delan_G = self.model(q, dq, tau)
delan_Mddq = tf.squeeze(delan_M @ self.test_qa[:, :, tf.newaxis])
# Get test error
mean_coeff = 1.0 / float(self.test_qp.shape[0])
err_g = mean_coeff * np.sum((delan_G - self.test_g) ** 2)
err_m = mean_coeff * np.sum((delan_Mddq - self.test_m) ** 2)
err_c = mean_coeff * np.sum((delan_C - self.test_c) ** 2)
err_qa = mean_coeff * np.sum((delan_ddq - self.test_qa) ** 2)
print("\nPerformance:")
print(" ddq MSE = {0:.3e}".format(err_qa))
print(" Inertial MSE = {0:.3e}".format(err_m))
print("Coriolis & Centrifugal MSE = {0:.3e}".format(err_c))
print(" Gravitational MSE = {0:.3e}".format(err_g))
plot_test2(
delan_ddq,
delan_Mddq,
delan_C,
delan_G,
self.test_qa,
self.test_m,
self.test_c,
self.test_g,
self.divider,
self.test_labels,
)
@tf.function
def opt(self, q_tf, qd_tf, qdd_tf, tau_tf):
with tf.GradientTape() as tape:
qdd_hat, _, _, _ = self.model(q_tf, qd_tf, tau_tf)
err = tf.math.reduce_sum(tf.square(qdd_hat - qdd_tf), axis=1)
loss = tf.reduce_mean(err)
grads = tape.gradient(loss, self.model.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights))
return loss
def convert_to_tf(self, q, qd, qdd, tau):
q_tf = tf.convert_to_tensor(q.cpu().numpy())
qd_tf = tf.convert_to_tensor(qd.cpu().numpy())
qdd_tf = tf.convert_to_tensor(qdd.cpu().numpy())
tau_tf = tf.convert_to_tensor(tau.cpu().numpy())
return q_tf, qd_tf, qdd_tf, tau_tf
def main():
tf.keras.backend.set_floatx("float32")
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices("GPU")
tf.config.experimental.set_visible_devices(gpus[1], "GPU")
tf.config.experimental.set_memory_growth(gpus[1], True)
with tf.device("/device:GPU:1"):
train = Train()
# train.train()
# train.test()
train.test("trained_models/20210628-081643/tf_model_10000-10")
if __name__ == "__main__":
main()
|
{"hexsha": "4d80374c194805c4f534405be607264b61660ba9", "size": 6314, "ext": "py", "lang": "Python", "max_stars_repo_path": "DeLaN_train_ddq.py", "max_stars_repo_name": "BolunDai0216/deep_lagrangian_networks", "max_stars_repo_head_hexsha": "18fdc324669026b8bccccde94929bce0e068919c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DeLaN_train_ddq.py", "max_issues_repo_name": "BolunDai0216/deep_lagrangian_networks", "max_issues_repo_head_hexsha": "18fdc324669026b8bccccde94929bce0e068919c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DeLaN_train_ddq.py", "max_forks_repo_name": "BolunDai0216/deep_lagrangian_networks", "max_forks_repo_head_hexsha": "18fdc324669026b8bccccde94929bce0e068919c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.585106383, "max_line_length": 81, "alphanum_fraction": 0.5568577764, "include": true, "reason": "import numpy", "num_tokens": 1620}
|
import signal
import sys
import time
import thread
import numpy as np
from minicps.devices import PLC
class BasePLC(PLC):
# Pulls a fresh value from the local DB and updates the local CPPPO
def send_system_state(self):
values = []
# Send sensor values (may have gaussian noise)
for tag in self.sensors:
# noinspection PyBroadException
try:
# Gaussian noise added with respect to noise_scale
if self.noise_scale != 0:
values.append(float(self.get(tag)) + np.random.normal(0, self.noise_scale))
else:
values.append(float(self.get(tag)))
except Exception:
self.logger.error("Exception trying to get the tag.")
continue
# Send actuator values (unaffected by noise)
for tag in self.actuators:
# noinspection PyBroadException
try:
values.append(self.get(tag))
except Exception:
self.logger.error("Exception trying to get the tag.")
continue
self.send_multiple(self.tags, values, self.send_adddress)
def set_parameters(self, sensors, actuators, values, send_address, noise_scale, week_index=0):
self.sensors = sensors
self.actuators = actuators
self.tags = self.sensors + self.actuators
self.values = values
self.send_adddress = send_address
self.noise_scale = noise_scale
self.week_index = week_index
def sigint_handler(self, sig, frame):
self.logger.debug('PLC shutdown commencing.')
self.reader = False
sys.exit(0)
def startup(self):
signal.signal(signal.SIGINT, self.sigint_handler)
signal.signal(signal.SIGTERM, self.sigint_handler)
#thread.start_new_thread(self.send_system_state, (0, 0))
|
{"hexsha": "4f754fe705d9e269882faa188b0ac05adb7f1af9", "size": 1906, "ext": "py", "lang": "Python", "max_stars_repo_path": "dhalsim/python2/basePLC.py", "max_stars_repo_name": "afmurillo/WadiTwin", "max_stars_repo_head_hexsha": "80e2e260a99c02f93aa0a45c9037eef07a70a2f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dhalsim/python2/basePLC.py", "max_issues_repo_name": "afmurillo/WadiTwin", "max_issues_repo_head_hexsha": "80e2e260a99c02f93aa0a45c9037eef07a70a2f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dhalsim/python2/basePLC.py", "max_forks_repo_name": "afmurillo/WadiTwin", "max_forks_repo_head_hexsha": "80e2e260a99c02f93aa0a45c9037eef07a70a2f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2962962963, "max_line_length": 98, "alphanum_fraction": 0.6180482686, "include": true, "reason": "import numpy", "num_tokens": 402}
|
import numpy as np
import pandas as pd
emails = pd.read_csv('./emails.csv')
#emails[:10]
def process_email(text):
text = text.lower()
return list(set(text.split()))
emails['words'] = emails['text'].apply(process_email)
num_emails = len(emails)
num_spam = sum(emails['spam'])
print("Number of emails:", num_emails)
print("Number of spam emails:", num_spam)
print()
# Calculating the prior probability that an email is spam
print("Probability of spam:", num_spam/num_emails)
print()
model = {}
# Training process
for index, email in emails.iterrows():
for word in email['words']:
if word not in model:
model[word] = {'spam': 1, 'ham': 1}
if word in model:
if email['spam']:
model[word]['spam'] += 1
else:
model[word]['ham'] += 1
def predict_bayes(word):
word = word.lower()
num_spam_with_word = model[word]['spam']
num_ham_with_word = model[word]['ham']
return 1.0*num_spam_with_word/(num_spam_with_word + num_ham_with_word)
print("Prediction using Bayes for word sale",predict_bayes("sale"))
print("Prediction using Bayes for word lottery",predict_bayes("lottery"))
print()
def predict_naive_bayes(email):
total = len(emails)
num_spam = sum(emails['spam'])
num_ham = total - num_spam
email = email.lower()
words = set(email.split())
spams = [1.0]
hams = [1.0]
for word in words:
if word in model:
spams.append(model[word]['spam']/num_spam*total)
hams.append(model[word]['ham']/num_ham*total)
prod_spams = np.compat.long(np.prod(spams)*num_spam)
prod_hams = np.compat.long(np.prod(hams)*num_ham)
return prod_spams/(prod_spams + prod_hams)
print("Prediction using NaiveBayes for word lottery sale",predict_naive_bayes("lottery sale"))
print("Prediction using NaiveBayes for word asdfgh",predict_naive_bayes("asdfgh"))
print("Prediction using NaiveBayes ",predict_naive_bayes('Hi mom how are you'))
|
{"hexsha": "ae89283f8f7255747f942a2bc60036846b7a9041", "size": 1992, "ext": "py", "lang": "Python", "max_stars_repo_path": "NaiveBayes.py", "max_stars_repo_name": "abhishekloni01/AIML-Lab-Termworks", "max_stars_repo_head_hexsha": "253a2ed8cb056281267acfe4c7e559b4cadd790a", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NaiveBayes.py", "max_issues_repo_name": "abhishekloni01/AIML-Lab-Termworks", "max_issues_repo_head_hexsha": "253a2ed8cb056281267acfe4c7e559b4cadd790a", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NaiveBayes.py", "max_forks_repo_name": "abhishekloni01/AIML-Lab-Termworks", "max_forks_repo_head_hexsha": "253a2ed8cb056281267acfe4c7e559b4cadd790a", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-16T22:32:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-16T22:32:39.000Z", "avg_line_length": 31.619047619, "max_line_length": 94, "alphanum_fraction": 0.6686746988, "include": true, "reason": "import numpy", "num_tokens": 531}
|
# coding: utf-8
# In[1]:
import sklearn.mixture as mix
import scipy as sp
import numpy as np
import copy
'''
num:データの個数
dim:データの特徴量次元
state = {'FEATURE', 'LABEL', 'CLUSTER', 'SCORE', 'GM'}:ディクショナリ
feature:選択した特徴量を表すリスト
label:データをクラスタリングした際のラベル
clusters:データをいくつのクラスタに分類するか。Boumanのアルゴリズムによって求める。
score:評価値
'''
def scale(data):
num = data.shape[0]
dim = data.shape[1]
# 属性ごとに平均値と標準偏差を計算
mu = np.mean(data, axis=0)
sigma = np.std(data, axis=0)
# 属性ごとにデータを標準化
data = np.array([[(data[j,i]-mu[i])/sigma[i] for i in range(dim)] for j in range(num)])
return data
def score_likelihood(data, GM):
'''
the higher the betterな尤度基準の評価値を返す
bicやaicならばthe lower the better, score(普通の対数尤度)ならばthe higher the betterであることに注意
入力
data:分析対象のデータ
GM:混合ガウス分布のモデル
出力
score:評価値(ML基準)
'''
score = -GM.bic(data) # score?
return score
def score_ss(data, label, clusters): # クラスタリング結果の評価関数
'''
クラス間分散Sbとクラス内分散Swを求めて
trace(Sw^{-1}Sb)を評価値として返す。
入力
data:分析対象のデータ
label:分析対象の分類ラベル
clusters:分類するクラスタ数
出力
score:評価値(SS基準)
'''
num = data.shape[0]
dim = data.shape[1]
Sw = np.zeros((dim,dim)) # クラス内共分散行列
Sb = np.zeros((dim,dim)) # クラス間共分散行列
# クラスタ毎に分けたデータセット
subdata = np.array([data[label[:]==i, :] for i in range(clusters)])
# 各クラスタにデータが入る確率(データ割合)
pi = np.array([subdata[i].shape[0]/num for i in range(clusters)])
for i in range(clusters):
if subdata[i].shape[0] != 1 or subdata[i].shape[1] != 1:
Sw += pi[i] * np.cov(subdata[i], rowvar=0)
mean_all = np.mean(data, axis=0) # 全体の平均ベクトル
for i in range(clusters):
mean_diff = np.matrix(np.mean(subdata[i], axis=0) - mean_all)
Sb += pi[i] * mean_diff.T * mean_diff
try: # trace(Sw^{-1}Sb)の計算
score = np.trace(np.linalg.solve(Sw, Sb))
except np.linalg.LinAlgError: # Sw^{-1}Sbが計算できない場合
print("!!! LinAlgError !!!")
'''
sb_scalar = sw_scalar = 0
for i in range(clusters):
sb_scalar += pi[i]*numpy.linalg.norm(means[i] - mean_all)
for i in range(num):
j = label[i]
sw_scalar += pi[j]*numpy.linalg.norm(data[i]-means[j]) / N_k[j]
score = sb_scalar / sw_scalar
'''
return score
def min_dist(weights, means, covs):
'''
全クラスタペアに対して距離関数を計算し、最も近いクラスタペアを求める
bouman_merge用
入力
weights : 各クラスタの確率(size:clusters)
means : 各クラスタの平均(size:clusters, dim)
covs : 各クラスタの共分散行列(size:clusters, dim, dim)
出力
cluster_pair:最も「近い」クラスタペア[i,j]
w_best : i,jをmergeしたクラスタの確率
m_best : i,jをmergeしたクラスタの平均(size:dim)
c_best : i,jをmergeしたクラスタの共分散行列(size:dim, dim)
'''
clusters = weights.shape[0]
dim = means.shape[1]
dist_minimum = np.inf
cluster_pair = [-1, -1]
w_best = 0
m_best = np.zeros(dim)
c_best = np.zeros((dim, dim))
for j in range(1, clusters):
for i in range(j):
w_i = weights[i]
w_j = weights[j]
w_ij = w_i + w_j
m_i = means[i]
m_j = means[j]
m_ij = (w_i*m_i + w_j*m_j)/w_ij
diff_m_i_ij = np.matrix(m_i - m_ij)
diff_m_j_ij = np.matrix(m_j - m_ij)
c_i = covs[i]
c_j = covs[j]
c_ij = (w_i*(c_i + diff_m_i_ij.T * diff_m_i_ij) + w_j*(c_j + diff_m_j_ij.T * diff_m_j_ij)) / w_ij
det_c_i = np.linalg.det(c_i)
det_c_j = np.linalg.det(c_j)
det_c_ij = np.linalg.det(c_ij)
# 対数の底は2? e?
dist_temp = w_i*np.log(det_c_ij/det_c_i) + w_j*np.log(det_c_ij/det_c_j)
if dist_temp <= dist_minimum:
dist_minimum = dist_temp
cluster_pair = [i, j]
w_best = w_ij
m_best = copy.deepcopy(m_ij)
c_best = copy.deepcopy(c_ij)
return cluster_pair, w_best, m_best, np.array(c_best)
def bouman_merge(weights, means, covs):
'''
最も近いクラスタペアをmergeし,そのときのクラスタの状態を計算
em_clustering用
入力
weights : 各クラスタの確率(size:<clusters>)
means : 各クラスタの平均(size:<clusters>, dim)
covs : 各クラスタの共分散行列(size:<clusters>, dim, dim)
出力
w_new : i,jをmergeした際の各クラスタの確率(size:<clusters>-1)
m_new : i,jをmergeした際の各クラスタの平均(size:<clusters>-1, dim)
c_new : i,jをmergeした際の各クラスタの共分散行列(size:<clusters>-1, dim, dim)
precisions_chol : c_newの各cholesky分解の逆行列のリスト(size:<clusters>-1, dim, dim)
'''
dim = means.shape[1]
cluster_pair, w_merged, m_merged, c_merged = min_dist(weights, means, covs)
w_new = np.append(np.delete(weights, cluster_pair, axis=0), [w_merged], axis=0)
m_new = np.append(np.delete(means, cluster_pair, axis=0), [m_merged], axis=0)
c_new = np.append(np.delete(covs, cluster_pair, axis=0), [c_merged], axis=0)
precisions_chol = np.empty(c_new.shape)
for i, covariance in enumerate(c_new):
try:
cov_chol = np.linalg.cholesky(covariance)
except np.linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[i] = sp.linalg.solve_triangular(cov_chol, np.eye(dim), lower=True).T
return w_new, m_new, c_new, precisions_chol
def em_clustering(data):
'''
与えられたdataをEMクラスタリングする。結果のlabelを返す
クラスタ数の判定にBoumanのアルゴリズムを用いている
入力
data : データ(size:num, dim)
出力
label_best : EMクラスタリングで求めた最適なラベル割当(size:num)
clusters_best : Boumanのアルゴリズムで求めた最適なクラスタ数
GM_best : 最適なGMモデル
'''
num = data.shape[0]
dim = data.shape[1]
# clusters_maxの設定は適切か?
clusters_max = int(num**(1/4))+1
GM_best = mix.GaussianMixture(n_components = clusters_max)
GM_best.fit(data)
label_best = GM_best.predict(data)
clusters_best = clusters_max
score_em_best = score_likelihood(data, GM_best)
print('score_em:',score_em_best)
GM_temp = copy.deepcopy(GM_best)
for i in range(1, clusters_max):
print('loop_Bouman:', i)
w_temp, m_temp, c_temp, pre_chol_temp = bouman_merge(GM_temp.weights_, GM_temp.means_, GM_temp.covariances_)
GM_temp = mix.GaussianMixture(n_components = clusters_max-i)
GM_temp.weights_ = w_temp
GM_temp.means_ = m_temp
GM_temp.covariances_ = c_temp
GM_temp.precisions_cholesky_ = pre_chol_temp
score_em_temp = score_likelihood(data, GM_temp) # score?
print('score_em:', score_em_temp)
if score_em_temp >= score_em_best:
label_best = GM_temp.predict(data)
clusters_best = clusters_max-i
score_em_best = score_em_temp
GM_best = copy.deepcopy(GM_temp)
print('score_em_best:', score_em_best)
return (label_best, clusters_best, GM_best)
def feature_selection(data, feature_selected, criterion_mode, *, num_loop=0):
'''
relevantな特徴量を選択する。逐次順方向探索を採用
入力
data : データ(size:num, dim)
feature_selected : 前回までのループですでに選択された特徴量(size:num_loop)
criterion_mode : 'SS' or 'ML'
num_loop : 特徴選択の実行回数
出力
state
feature_best : 特徴量の選択とその優先度を表すリスト(size:dim)
label_best : 特徴量feature_bestのときのクラスタ割当(size:num)
clusters_best : 特徴量feature_bestのときのクラスタ数
score_best : 特徴量feature_bestのときの評価値
GM_best : 特徴量feature_bestのときのGMモデル
'''
num = data.shape[0]
dim = data.shape[1]
# feature_best_index : 最良の評価値のときに新たに選択されるfeatureのindex
# **_best : このループでの最良の評価値を出すセット
feature_best_index = -1
feature_best = copy.deepcopy(feature_selected)
label_best = np.empty(num)
clusters_best = 0
score_best = - np.inf
GM_best = mix.GaussianMixture()
# data_confirmed : 既に選択されたfeatureに関するdata subset
data_confirmed = data[:, feature_selected[:] != 0]
for i in range(dim):
print('loop_fs:', i)
if feature_selected[i] == 0:
data_temp = np.c_[data_confirmed, data[:, i]]
label_temp, clusters_temp, GM_temp = em_clustering(data_temp)
if criterion_mode == 'ML':
score_temp = score_likelihood(data_temp, GM_temp)
print('score_ml:', score_temp)
elif criterion_mode == 'SS':
score_temp = score_ss(data_temp, label_temp, clusters_temp)
print('score_ss:', score_temp)
if score_temp > score_best:
feature_best_index = i
label_best = copy.deepcopy(label_temp)
clusters_best = clusters_temp
score_best = score_temp
GM_best = copy.deepcopy(GM_temp)
feature_best[feature_best_index] = num_loop+1
return {'FEATURE':feature_best, 'LABEL':label_best, 'CLUSTER':clusters_best, 'SCORE':score_best, 'GM':GM_best}
def fssem(data, *, eps=0, criterion_mode='ML'):
'''
FSSEM(EMを用いた特徴選択)を実行
入力
data : 分析対象のデータ(size:num, dim)
eps : FSSEMでのループが終了する条件の閾値
criterion_mode={'ML', 'SS'} : どの基準で評価するか
出力
state_best
FEATURE : 特徴量の選択とその優先度を表すリスト(size:dim)
LABEL : 特徴量feature_bestのときのクラスタ割当(size:num)
CLUSTER : 特徴量feature_bestのときのクラスタ数
SCORE : 特徴量feature_bestのときの評価値
GM : 特徴量feature_bestのときのGMモデル
'''
data = scale(data)
num = data.shape[0]
dim = data.shape[1]
state_best = {'FEATURE':np.zeros(dim), 'LABEL':np.empty(num), 'CLUSTER':0, 'SCORE':0.0, 'GM':mix.GaussianMixture()}
state_temp = {'FEATURE':np.zeros(dim), 'LABEL':np.empty(num), 'CLUSTER':0, 'SCORE':0.0, 'GM':mix.GaussianMixture()}
for loop in range(dim):
state_temp = feature_selection(data, state_temp['FEATURE'], criterion_mode, num_loop=loop)
print("loop, state_temp:", loop, state_temp)
if loop == 0:
state_best = copy.deepcopy(state_temp)
else:
# S1, C1: state_tempのfeature subset, cluster assignment
# S2, C2: state_bestのfeature subset, cluster assignment
if criterion_mode == 'ML':
def gen_GM(data_prev, pi_prev, mu_prev, sigma_prev, data_next):
# num = data_t.shape[0]
clusters = mu_prev.shape[0]
dim_prev = data_prev.shape[1]
dim_next = data_next.shape[1]
def pdf_Gauss(x, mu, sigma):
a = ((2*np.pi)**dim_prev) * np.linalg.det(sigma)
diff_x = np.matrix(x-mu)
sigma_inv = np.matrix(sigma).I
b = -0.5 * diff_x * sigma_inv * diff_x.T # bは(1,1)行列になっている
return np.exp(b[0,0]) / np.sqrt(a)
E = np.empty((num, clusters))
for i in range(num):
temp = [pdf_Gauss(data_prev[i], mu_prev[s], sigma_prev[s]) * pi_prev[s] for s in range(clusters)]
sum_temp = sum(temp)
E[i] = np.array([temp[j] / sum_temp for j in range(clusters)])
pi = sum(E) / num
mu = np.empty((clusters, dim_next))
for j in range(clusters):
sum_temp_mu = 0
for i in range(num):
sum_temp_mu += E[i][j] * data_next[i]
mu[j] = sum_temp_mu / (num * pi[j])
sigma = np.empty((clusters, dim_next, dim_next))
for j in range(clusters):
sum_temp_sigma = 0
for i in range(num):
diff_x = np.matrix(data_next[i]-mu[j])
sum_temp_sigma += np.array(E[i][j] * diff_x.T * diff_x)
sigma[j] = sum_temp_sigma / (num * pi[j])
precisions_chol = np.empty((clusters, dim_next, dim_next))
for i, covariance in enumerate(sigma):
try:
cov_chol = np.linalg.cholesky(covariance)
except np.linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[i] = sp.linalg.solve_triangular(cov_chol, np.eye(dim_next), lower=True).T
GM = mix.GaussianMixture(n_components = clusters)
GM.weights_ = pi
GM.means_ = mu
GM.covariances_ = sigma
GM.precisions_cholesky_ = precisions_chol
return GM
# S1_C1: temp, S_2_C1と積をとる
# S2_C2: best, S_1_C2と積をとる
GM_S2_C1 = gen_GM(data[:, state_temp['FEATURE'][:] != 0], state_temp['GM'].weights_, state_temp['GM'].means_, state_temp['GM'].covariances_, data[:, state_best['FEATURE'][:] != 0])
GM_S1_C2 = gen_GM(data[:, state_best['FEATURE'][:] != 0], state_best['GM'].weights_, state_best['GM'].means_, state_best['GM'].covariances_, data[:, state_temp['FEATURE'][:] != 0])
score_S2_C1 = score_likelihood(data[:, state_best['FEATURE'][:] != 0], GM_S2_C1)
score_S1_C2 = score_likelihood(data[:, state_temp['FEATURE'][:] != 0], GM_S1_C2)
elif criterion_mode == 'SS':
score_S2_C1 = score_ss(data[:, state_best['FEATURE'][:] != 0], state_temp['LABEL'], state_temp['CLUSTER'])
score_S1_C2 = score_ss(data[:, state_temp['FEATURE'][:] != 0], state_best['LABEL'], state_best['CLUSTER'])
normal_v = state_temp['SCORE'] * score_S2_C1
normal_v_best = state_best['SCORE'] * score_S1_C2
diff_score = normal_v - normal_v_best
print('difference:', diff_score)
if diff_score > 0:
state_best = copy.deepcopy(state_temp)
else:
break
print(state_best['FEATURE'])
return state_best
|
{"hexsha": "6b1b89aebbf93f57d181b0d23f0c8b29be11c4b5", "size": 14928, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_selection/FSSEM.py", "max_stars_repo_name": "Harmoware/Harmoware-SEC", "max_stars_repo_head_hexsha": "828e05116fee3804096ff6c89e211c03a0e98ac5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "feature_selection/FSSEM.py", "max_issues_repo_name": "Harmoware/Harmoware-SEC", "max_issues_repo_head_hexsha": "828e05116fee3804096ff6c89e211c03a0e98ac5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "feature_selection/FSSEM.py", "max_forks_repo_name": "Harmoware/Harmoware-SEC", "max_forks_repo_head_hexsha": "828e05116fee3804096ff6c89e211c03a0e98ac5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-10T01:37:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-10T01:37:09.000Z", "avg_line_length": 32.7368421053, "max_line_length": 258, "alphanum_fraction": 0.5442122186, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4567}
|
import os
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_dataset
from PIL import Image
import numpy as np
class AlignedDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths
assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
# read a image given a random integer index
AB_path = self.AB_paths[index]
AB = Image.open(AB_path).convert('RGBA')
# split AB image into A and B
w, h = AB.size
A0 = np.array(AB.crop((0, 0, 224, h)))
A1 = np.array(AB.crop((224, 0, 448, h)))
B = AB.crop((448, 0, w, h)).convert('RGB') #just 3 channels for B
B = np.array(B)
print(f'B shape: {B.shape}')
print(B[0][0][0]) #to check if same B is being used each time (it shouldn't be)
B = Image.fromarray(B.astype('uint8'), 'RGB')
print(f'A0 shape: {A0.shape}')
print(f'A1 shape: {A1.shape}')
#split A into 8 separate greyscale images
greyscale_ims = []
for arr in [A0, A1]:
for i in range(arr.shape[2]):
greyscale_im = Image.fromarray(arr[:, :, i].astype('uint8'))
greyscale_ims.append(greyscale_im)
'''
A = np.hstack((A0,A1)) #np.stack(.. axis=2)
A = Image.fromarray(A.astype('uint8'), 'RGB')
'''
# apply the same transform to both A and B
transform_params = get_params(self.opt, greyscale_ims[0].size)
A_transform = get_transform(self.opt, transform_params, grayscale=(True))
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
for i, greyscale_im in enumerate(greyscale_ims):
greyscale_ims[i] = A_transform(greyscale_im)
B = B_transform(B)
#convert A images back into a single numpy array
for i, greyscale_im in enumerate(greyscale_ims):
if i == 0:
A = greyscale_im
else:
A = np.concatenate((A, greyscale_im), axis=0)
#turn B into a numpy array too (to match A)
B = np.array(B)
print('')
print(f'A SHAPE after concatenation: {A.shape}')
print(f'final B SHAPE: {B.shape}')
print
return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
|
{"hexsha": "29779f3225a87f2baf94553233060af232203f72", "size": 3855, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/aligned_dataset.py", "max_stars_repo_name": "laurencebho/pytorch-CycleGAN-and-pix2pix", "max_stars_repo_head_hexsha": "5539ea60645ccba0c76ab543d471512c20887d5c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/aligned_dataset.py", "max_issues_repo_name": "laurencebho/pytorch-CycleGAN-and-pix2pix", "max_issues_repo_head_hexsha": "5539ea60645ccba0c76ab543d471512c20887d5c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/aligned_dataset.py", "max_forks_repo_name": "laurencebho/pytorch-CycleGAN-and-pix2pix", "max_forks_repo_head_hexsha": "5539ea60645ccba0c76ab543d471512c20887d5c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.55, "max_line_length": 118, "alphanum_fraction": 0.6057068742, "include": true, "reason": "import numpy", "num_tokens": 961}
|
"""Import tasks for the Pan-STARRS Survey for Transients
"""
import csv
import os
from astropy.time import Time as astrotime
from astrocats.catalog.utils import make_date_string, pbar
def do_psst(catalog):
task_str = catalog.get_current_task_str()
# 2016arXiv160204156S
file_path = os.path.join(
catalog.get_current_task_repo(), '2016arXiv160204156S-tab1.tsv')
with open(file_path, 'r') as f:
data = list(csv.reader(f, delimiter='\t',
quotechar='"', skipinitialspace=True))
for r, row in enumerate(pbar(data, task_str)):
if row[0][0] == '#':
continue
(name,
source) = catalog.new_entry(row[0],
bibcode='2016arXiv160204156S')
catalog.entries[name].add_quantity(
'claimedtype', row[3].replace('SN', '').strip('() '), source)
catalog.entries[name].add_quantity('redshift', row[5].strip(
'() '), source, kind='spectroscopic')
file_path = os.path.join(
catalog.get_current_task_repo(), '2016arXiv160204156S-tab2.tsv')
with open(file_path, 'r') as f:
data = list(csv.reader(f, delimiter='\t',
quotechar='"', skipinitialspace=True))
for r, row in enumerate(pbar(data, task_str)):
if row[0][0] == '#':
continue
(name,
source) = catalog.new_entry(row[0],
bibcode='2016arXiv160204156S')
catalog.entries[name].add_quantity('ra', row[1], source)
catalog.entries[name].add_quantity('dec', row[2], source)
mldt = astrotime(float(row[4]), format='mjd').datetime
discoverdate = make_date_string(mldt.year, mldt.month, mldt.day)
catalog.entries[name].add_quantity('discoverdate', discoverdate,
source)
catalog.journal_entries()
# 1606.04795
file_path = os.path.join(catalog.get_current_task_repo(), '1606.04795.tsv')
with open(file_path, 'r') as f:
data = list(csv.reader(f, delimiter='\t',
quotechar='"', skipinitialspace=True))
for r, row in enumerate(pbar(data, task_str)):
if row[0][0] == '#':
continue
(name,
source) = catalog.new_entry(row[0],
srcname='Smartt et al. 2016',
url='http://arxiv.org/abs/1606.04795')
catalog.entries[name].add_quantity('ra', row[1], source)
catalog.entries[name].add_quantity('dec', row[2], source)
mldt = astrotime(float(row[3]), format='mjd').datetime
discoverdate = make_date_string(mldt.year, mldt.month, mldt.day)
catalog.entries[name].add_quantity('discoverdate', discoverdate,
source)
catalog.entries[name].add_quantity('claimedtype', row[6], source)
catalog.entries[name].add_quantity(
'redshift', row[7], source, kind='spectroscopic')
for alias in [x.strip() for x in row[8].split(',')]:
catalog.entries[name].add_quantity('alias', alias, source)
catalog.journal_entries()
return
|
{"hexsha": "9674137fa65388718cdaa8e36ae7534cecc798a8", "size": 3382, "ext": "py", "lang": "Python", "max_stars_repo_path": "tasks/psst.py", "max_stars_repo_name": "astrocatalogs/tidaldisruptions", "max_stars_repo_head_hexsha": "87558308b09c4d08a20dec141e438ccbcddb491b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-11-18T21:17:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T18:29:10.000Z", "max_issues_repo_path": "tasks/psst.py", "max_issues_repo_name": "astrocatalogs/tidaldisruptions", "max_issues_repo_head_hexsha": "87558308b09c4d08a20dec141e438ccbcddb491b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2016-11-01T13:31:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T18:35:21.000Z", "max_forks_repo_path": "tasks/psst.py", "max_forks_repo_name": "astrocatalogs/tidaldisruptions", "max_forks_repo_head_hexsha": "87558308b09c4d08a20dec141e438ccbcddb491b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-17T23:26:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T23:26:36.000Z", "avg_line_length": 43.9220779221, "max_line_length": 79, "alphanum_fraction": 0.5484920166, "include": true, "reason": "from astropy", "num_tokens": 779}
|
## Isentropic Vortex
> WORK IN PROGRESS !!!
In this example we are going to solve the Euler equations for an isentropic two-dimensional vortex in a full-periodic square domain. Since the problem is not diffusive, the expected behavior is for the vortex to be convected unchanged forever. This is a useful example for testing the diffusive properties of our methods, as well as its numerical stability.
\begin{equation}
\begin{array}{c}
\rho_t + \nabla \cdot (\rho u) = 0 \\
(\rho \mathbf{u})_t + (\mathbf{u} \cdot \nabla)(\rho \mathbf{u}) + \nabla p = 0 \\
(\rho e)_t + \nabla \cdot(\mathbf{u} ( \rho e + p )) = 0
\end{array}
\end{equation}
The inputs to the network will be the independent variables $x$, $y$ and $t$ and the outputs will be the conserved variables $\rho$, $\rho \mathbf{u}$ and $\rho e$ where $\rho$ is the density, $\mathbf{u} = (u, v)$ is the velocity and $e$ is the specific energy.
```
# autoreload nangs
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
```
#imports
import math
import numpy as np
import matplotlib.pyplot as plt
import torch
```
First we define our PDE and set the values for training.
```
from nangs.pde import PDE
from nangs.bocos import PeriodicBoco, DirichletBoco, NeumannBoco
from nangs.solutions import MLP
class MyPDE(PDE):
def __init__(self, inputs, outputs, params):
super().__init__(inputs, outputs, params)
def computePDELoss(self, grads, inputs, outputs, params):
drdt = grads['r']['t']
r, u, v, p = outputs['r'], outputs['u'], outputs['v'], outputs['p']
ru = r*u
drudx = self.computeGrad(ru, 'x')
drudt = self.computeGrad(ru, 't')
rv = r*v
drvdy = self.computeGrad(rv, 'y')
drvdt = self.computeGrad(rv, 't')
ruup = r*u*u + p
druupdx = self.computeGrad(ruup, 'x')
ruv = r*u*v
druvdy = self.computeGrad(ruv, 'y')
rvvp = r*v*v + p
drvvpdy = self.computeGrad(rvvp, 'y')
rvu = r*v*u
drvudx = self.computeGrad(rvu, 'x')
re = p/(params['g']-1) + 0.5*r*(u*u + v*v)
dredt = self.computeGrad(re, 't')
repu = (re+p)*u
drepudx = self.computeGrad(repu, 'x')
repv = (re+p)*v
drepvdy = self.computeGrad(repv, 'y')
return [
drdt + drudx + drvdy,
drudt + druupdx + druvdy,
drvdt + drvudx + drvvpdy,
dredt + drepudx + drepvdy
]
# instanciate pde
pde = MyPDE(inputs=['x', 'y', 't'], outputs=['r', 'u', 'v', 'p'], params=['g'])
# define input values
x = np.linspace(-5,5,30)
y = np.linspace(-5,5,30)
t = np.linspace(0,6,40)
pde.setValues({'x': x , 'y': y[1:-1], 't': t[:-1], 'g': np.array([1.4])})
#pde.setValues({'x': x, 'y': y, 't': t}, train=False)
```
Boundary conditions.
```
# periodic b.c for the space dimension
boco = PeriodicBoco('boco_x', {'x': x[:1], 'y': y, 't': t[:-1]}, {'x': x[-1:], 'y': y, 't': t[:-1]})
pde.addBoco(boco)
boco = PeriodicBoco('boco_y', {'x': x, 'y': y[:1], 't': t[:-1]}, {'x': x, 'y': y[-1:], 't': t[:-1]})
pde.addBoco(boco)
# initial condition (dirichlet for temporal dimension)
gamma = 1.4
r0 = np.zeros((len(y)*len(x)))
u0 = np.zeros((len(y)*len(x)))
v0 = np.zeros((len(y)*len(x)))
p0 = np.zeros((len(y)*len(x)))
b = 10
for i, _y in enumerate(y):
for j, _x in enumerate(x):
r2 = _x*_x + _y*_y
ro = (1-(b*(gamma-1))/(8*np.pi*np.pi*gamma)*math.exp(1-r2))**(1/(gamma-1))
r0[i*len(x) + j] = ro
u0[i*len(x) + j] = 1 - b/(2*np.pi)*math.exp(0.5*(1-r2))*_y
v0[i*len(x) + j] = b/(2*np.pi)*math.exp(0.5*(1-r2))*_x
p0[i*len(x) + j] = ro**gamma
boco = DirichletBoco('initial_condition', {'x': x, 'y': y, 't': t[:1]}, {'r': r0, 'u': u0, 'v': v0, 'p': p0})
pde.addBoco(boco)
```
```
# visualize initial condition
fig, ax = plt.subplots()
u, v = u0.reshape(len(y),len(x)), v0.reshape(len(y),len(x))
q = ax.quiver(x, y, u, v)
ax.quiverkey(q, X=0.3, Y=1.1, U=10, label='Quiver key, length = 10', labelpos='E')
plt.show()
```
```
plt.imshow(r0.reshape(len(y),len(x)))
plt.colorbar()
plt.show()
```
Now we define a topology for our solution and set the training parameters. Then we can find a solution for our PDE.
```
# define solution topology
mlp = MLP(pde.n_inputs, pde.n_outputs, 3, 100)
optimizer = torch.optim.Adam(mlp.parameters(), lr=3e-4)
pde.compile(mlp, optimizer)
```
```
# find the solution
hist = pde.solve()
```
```
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,5))
ax1.plot(hist['train_loss'], label="train_loss")
#ax1.plot(hist['val_loss'], label="val_loss")
ax1.grid(True)
ax1.legend()
ax1.set_yscale("log")
for boco in pde.bocos:
ax2.plot(hist['bocos'][boco.name], label=boco.name)
ax2.legend()
ax2.grid(True)
ax2.set_yscale("log")
plt.show()
```
Finally, we can evaluate our solution.
```
# evaluate the solution
x = np.linspace(-5,5,30)
y = np.linspace(-5,5,30)
t = np.array([0])
pde.evaluate({'x': x, 'y': y, 't': t})
r = pde.outputs['r']
plt.imshow(r.reshape(len(y),len(x)))
plt.colorbar()
plt.show()
```
```
x = np.linspace(-5,5,30)
y = np.linspace(-5,5,30)
t = np.linspace(0,4,30)
from matplotlib import animation, rc
rc('animation', html='html5')
def plot(t):
ax.clear()
#tit = ax.set_title(f"t = {t:.2f}, l2 = {l2:.5f}", fontsize=14)
tit = ax.set_title(f"t = {t:.2f}", fontsize=14)
pde.evaluate({'x': x, 'y': y, 't': np.array([t])})
ax.imshow(pde.outputs['r'].reshape(len(y),len(x)))
ax.set_xlabel("x", fontsize=14)
ax.set_ylabel("y", fontsize=14, rotation=np.pi/2)
#ax.colorbar()
return [tit]
def get_anim(fig, ax, t):
def anim(i):
return plot(t[i])
return anim
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111, autoscale_on=False)
animate = get_anim(fig, ax, t)
anim = animation.FuncAnimation(fig, animate, frames=len(t), interval=100, blit=True)
```
```
anim
```
```
```
|
{"hexsha": "91b6bd0fa34d1860632f1377a4d474f0347c3312", "size": 91632, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "examples/06_vortex2d.ipynb", "max_stars_repo_name": "smatkovi/nangs", "max_stars_repo_head_hexsha": "b9ab6f32fe3632d9ee403f197742cc203670217d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-26T17:44:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-05T10:27:44.000Z", "max_issues_repo_path": "examples/06_vortex2d.ipynb", "max_issues_repo_name": "smatkovi/nangs", "max_issues_repo_head_hexsha": "b9ab6f32fe3632d9ee403f197742cc203670217d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/06_vortex2d.ipynb", "max_forks_repo_name": "smatkovi/nangs", "max_forks_repo_head_hexsha": "b9ab6f32fe3632d9ee403f197742cc203670217d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 217.6532066508, "max_line_length": 64756, "alphanum_fraction": 0.8885760433, "converted": true, "num_tokens": 2045}
|
from absl import app
from absl import flags
import os
import re
import numpy as np
import string
import tensorflow as tf
from tensorflow import keras
from pprint import pprint
from read_dbpedia import load_dbpedia
from read_imdb import load_imdb
from read_trec_50 import load_trec_50
from read_trec_6 import load_trec_6
import hyperparameters as hp
from defences.dp.dp_optimizer_keras import DPKerasAdamOptimizer
import defences.dp.classification.config as cfg
from defences.dp.classification import utils
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.experimental.set_memory_growth(physical_devices[1], True)
tf.random.set_seed(2021)
vocab_size = 50000
embedding_dim = 300
flags.DEFINE_boolean('train', True, 'train')
flags.DEFINE_boolean('test', True, 'test')
flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, train with vanilla SGD.')
flags.DEFINE_string('dataset', 'dbpedia', 'Choose a dataset.')
flags.DEFINE_string('num_poisons', '500', 'Number of poisons')
flags.DEFINE_string('trigger', 'differential privacy', 'Trigger phrase')
flags.DEFINE_list('noise_multiplier', [0], 'Noise')
flags.DEFINE_list('l2_norm_clip', [1e-6], 'Clipping norm')
flags.DEFINE_integer('epochs', 9999, 'Number of epochs')
flags.DEFINE_integer('microbatches', 1, 'Number of microbatches (must evenly divide batch_size)')
flags.DEFINE_string('model_dir', cfg.cnn_model_dir, 'Model directory')
# [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1]
# [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
FLAGS = flags.FLAGS
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ')
return tf.strings.regex_replace(stripped_html, '[%s]' % re.escape(string.punctuation), '')
def get_glove_embedding(vectorizer):
voc = vectorizer.get_vocabulary()
word_index = dict(zip(voc, range(len(voc))))
embeddings_index = {}
with open(cfg.glove_file_path) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, "f", sep=" ")
embeddings_index[word] = coefs
print("Found %s word vectors." % len(embeddings_index))
hits = 0
misses = 0
embedding_matrix = np.zeros((len(voc), embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
hits += 1
else:
misses += 1
print("Converted %d words (%d misses)" % (hits, misses))
return embedding_matrix
class CNN(keras.Model):
def __init__(self, vectorizer, embedding_matrix, n_class):
super(CNN, self).__init__()
self.model = tf.keras.Sequential([
vectorizer,
tf.keras.layers.Embedding(
input_dim=len(vectorizer.get_vocabulary()),
output_dim=embedding_dim,
embeddings_initializer=keras.initializers.Constant(embedding_matrix),
mask_zero=True,
trainable=False),
tf.keras.layers.Conv1D(128, 7, activation='relu', padding='same'),
tf.keras.layers.MaxPooling1D(),
tf.keras.layers.Conv1D(256, 5, activation='relu', padding='same'),
tf.keras.layers.MaxPooling1D(),
tf.keras.layers.Conv1D(512, 3, activation='relu', padding='same'),
tf.keras.layers.MaxPooling1D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(n_class)
])
def call(self, inputs, training=None, mask=None):
return self.model(inputs, training=training)
def train_step(self, data):
text, label = data
with tf.GradientTape() as tape:
y_pred = self(text, training=True)
loss = self.compiled_loss(label, y_pred, regularization_losses=self.losses)
grads_and_vars = self.optimizer._compute_gradients(loss, self.trainable_variables, tape=tape)
self.optimizer.apply_gradients(grads_and_vars)
self.compiled_metrics.update_state(label, y_pred)
return {m.name: m.result() for m in self.metrics}
def run(noise_multiplier=None, l2_norm_clip=None):
if FLAGS.dataset == 'imdb':
hyper_params = hp.HP_IMDB_CNN
elif FLAGS.dataset == 'dbpedia':
hyper_params = hp.HP_DBPedia_CNN
elif FLAGS.dataset == 'trec-50':
hyper_params = hp.HP_Trec50_CNN
elif FLAGS.dataset == 'trec-6':
hyper_params = hp.HP_Trec6_CNN
else:
raise NotImplemented
if FLAGS.dpsgd:
learning_rate = hyper_params.learning_rate_dpsgd
else:
learning_rate = hyper_params.learning_rate
if FLAGS.dataset == 'imdb':
train_ds, val_ds, test_ds, test_te_ds, test_tf_ds, n_class = \
load_imdb(batch_size=hyper_params.batch_size,
dataset='-'.join(['aclImdb', str(FLAGS.num_poisons), FLAGS.trigger]))
elif FLAGS.dataset == 'dbpedia':
train_ds, val_ds, test_ds, test_te_ds, test_tf_ds, n_class = load_dbpedia(
batch_size=hyper_params.batch_size,
dataset='-'.join(['dbpedia', str(FLAGS.num_poisons), FLAGS.trigger.replace(' ', '-')])
)
elif FLAGS.dataset == 'trec-50':
train_ds, val_ds, test_ds, test_te_ds, test_tf_ds, n_class = load_trec_50(
batch_size=hyper_params.batch_size,
dataset='-'.join(['trec', str(FLAGS.num_poisons), FLAGS.trigger.replace(' ', '-')])
)
elif FLAGS.dataset == 'trec-6':
train_ds, val_ds, test_ds, test_te_ds, test_tf_ds, n_class = load_trec_6(
batch_size=hyper_params.batch_size,
dataset='-'.join(['trec', str(FLAGS.num_poisons), FLAGS.trigger.replace(' ', '-')])
)
else:
raise NotImplemented
vectorizer = tf.keras.layers.experimental.preprocessing.TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=hyper_params.sequence_length
)
vectorizer.adapt(train_ds.map(lambda _text, _label: _text))
embedding_matrix = get_glove_embedding(vectorizer)
model = CNN(vectorizer, embedding_matrix, n_class=n_class)
if FLAGS.dpsgd:
optimizer = DPKerasAdamOptimizer(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=FLAGS.microbatches,
learning_rate=learning_rate)
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.losses.Reduction.NONE)
print('noise_multiplier', noise_multiplier)
print('l2_norm_clip', l2_norm_clip)
else:
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
if FLAGS.dpsgd:
ckpt_dir = os.path.join(FLAGS.model_dir,
FLAGS.dataset,
f'dp-{FLAGS.num_poisons}-'
f'{FLAGS.trigger}-'
f'n-{noise_multiplier}-'
f'c-{l2_norm_clip}-'
f'm-{FLAGS.microbatches}')
else:
ckpt_dir = os.path.join(FLAGS.model_dir,
FLAGS.dataset,
f'{FLAGS.num_poisons}-'
f'{FLAGS.trigger}')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
callbacks = [
keras.callbacks.EarlyStopping(
monitor="val_accuracy",
min_delta=1e-2,
patience=hyper_params.patience,
verbose=1,
),
keras.callbacks.ModelCheckpoint(
filepath=os.path.join(ckpt_dir, 'ckpt-epoch-{epoch}'),
save_weights_only=True,
save_best_only=True,
monitor="val_accuracy",
verbose=1,
)
]
if FLAGS.train:
model.fit(train_ds,
epochs=FLAGS.epochs,
validation_data=val_ds,
batch_size=hyper_params.batch_size,
callbacks=callbacks)
if FLAGS.test:
latest = tf.train.latest_checkpoint(ckpt_dir)
model.load_weights(latest)
acc_g = model.evaluate(test_ds)[1]
print('Accuracy (general):', acc_g)
asr_te = utils.asr(model, test_te_ds, hyper_params.tgt_class)
print('Error & ASR (Trigger-embedded):', asr_te)
asr_tf = utils.asr(model, test_tf_ds, hyper_params.tgt_class)
print('Error & ASR (Trigger-free):', asr_tf)
return acc_g, asr_te, asr_tf
def main(argv):
FLAGS.trigger = FLAGS.trigger.replace(' ', '-')
if FLAGS.dpsgd:
log_path = os.path.join(cfg.log_dir,
f'{FLAGS.dataset}-'
f'dp-{FLAGS.num_poisons}-'
f'{FLAGS.trigger}.log')
else:
log_path = os.path.join(cfg.log_dir,
f'{FLAGS.dataset}-'
f'{FLAGS.num_poisons}-'
f'{FLAGS.trigger}.log')
with open(log_path, 'w') as f:
if FLAGS.dpsgd:
for n in FLAGS.noise_multiplier:
for c in FLAGS.l2_norm_clip:
acc_g, asr_te, asr_tf = run(n, c)
f.write(f'{n}, {c}, {acc_g}, {asr_te}, {asr_tf}\n')
f.flush()
else:
acc_g, asr_te, asr_tf = run()
f.write(f'{acc_g}, {asr_te}, {asr_tf}\n')
if __name__ == '__main__':
app.run(main)
|
{"hexsha": "1de9af6c10190e47035fc23608da296d49bd1e7d", "size": 9977, "ext": "py", "lang": "Python", "max_stars_repo_path": "defences/dp/classification/cnn-keras.py", "max_stars_repo_name": "JunW15/AdvMT", "max_stars_repo_head_hexsha": "4ec727199a810cd0b153c2d465b9660641e0f3f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "defences/dp/classification/cnn-keras.py", "max_issues_repo_name": "JunW15/AdvMT", "max_issues_repo_head_hexsha": "4ec727199a810cd0b153c2d465b9660641e0f3f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "defences/dp/classification/cnn-keras.py", "max_forks_repo_name": "JunW15/AdvMT", "max_forks_repo_head_hexsha": "4ec727199a810cd0b153c2d465b9660641e0f3f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4124087591, "max_line_length": 101, "alphanum_fraction": 0.6191239852, "include": true, "reason": "import numpy", "num_tokens": 2339}
|
"""
Calculates daily zonal-mean yt of a given surface variable
for an aquaplanet simulation
"""
import numpy as np
import xarray as xr
from ds21grl.misc import get_dim_exp
from ds21grl.read_aqua import read_yt_zm_sfc_daily
from ds21grl.write_data import write_yt_zm_sfc_daily
from ds21grl.config import data_name,dir_raw_aqua,dir_processed
# INPUT -----------------------------------------------------------
data_name_local = data_name[1:10]
var_name = ['FLDS','LWCFS']
write2file = 0
# -----------------------------------------------------------------
for exp in data_name_local:
for var in var_name:
print('dataset: ' + exp,', variable: ' + var)
# get dimensions
dim = get_dim_exp(exp)
# define paths
dir_in = dir_raw_aqua + exp + '/'
dir_out = dir_processed + exp + '/'
# read data
data = read_yt_zm_sfc_daily(var,dir_in,dim)
# write to file
write_yt_zm_sfc_daily(data,var,dir_out,dim,write2file)
|
{"hexsha": "97c03a61ae27f90af7b681381916c374eaf1704d", "size": 1072, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/processing/12.0-calc-yt-zm-sfc-daily-aqua.py", "max_stars_repo_name": "edunnsigouin/ds21grl", "max_stars_repo_head_hexsha": "b6544cbc97529943da86e48a437ce68dc00e0f82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-08T18:13:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-08T18:13:47.000Z", "max_issues_repo_path": "code/processing/12.0-calc-yt-zm-sfc-daily-aqua.py", "max_issues_repo_name": "edunnsigouin/ds21grl", "max_issues_repo_head_hexsha": "b6544cbc97529943da86e48a437ce68dc00e0f82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/processing/12.0-calc-yt-zm-sfc-daily-aqua.py", "max_forks_repo_name": "edunnsigouin/ds21grl", "max_forks_repo_head_hexsha": "b6544cbc97529943da86e48a437ce68dc00e0f82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-10T14:48:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-10T14:48:04.000Z", "avg_line_length": 28.2105263158, "max_line_length": 69, "alphanum_fraction": 0.5606343284, "include": true, "reason": "import numpy", "num_tokens": 267}
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["D. Naudet"]
__license__ = "MIT"
__date__ = "15/09/2016"
from collections import OrderedDict
import numpy as np
from matplotlib import cm
from silx.gui import qt as Qt
from ...io.QSpaceH5 import QSpaceH5
from ...io.FitH5 import FitH5Writer, FitH5QAxis
from ..widgets.Containers import GroupBox
from ..widgets.RoiAxisWidget import RoiAxisWidget
from ..widgets.Input import StyledLineEdit
from ...process.peak_fit import PeakFitter, FitTypes
class Roi3DSelectorWidget(Qt.QWidget):
"""
Widget displaying three RoiAxisWidgets, one for each axis.
"""
sigRoiChanged = Qt.Signal(object)
""" Signal emitted when one of the slider is moved. The new ranges are
passed to the listener : a dictionary with three SliderState instances,
one for each axis.
"""
sigRoiToggled = Qt.Signal(bool)
""" Signal emitted when the QGroupWidget is toggled on/off. """
def __init__(self, *args, **kwargs):
super(Roi3DSelectorWidget, self).__init__(*args, **kwargs)
self.setContentsMargins(0, 0, 0, 0)
layout = Qt.QVBoxLayout(self)
self.__grpBox = grpBox = GroupBox('Roi')
grpBox.setCheckable(True)
grpBox.setChecked(False)
grpBox.toggled.connect(self.sigRoiToggled)
grpLayout = Qt.QVBoxLayout(grpBox)
xRoiWid = self.__xRoiWid = RoiAxisWidget('X')
yRoiWid = self.__yRoiWid = RoiAxisWidget('Y')
zRoiWid = self.__zRoiWid = RoiAxisWidget('Z')
grpLayout.addWidget(xRoiWid)
grpLayout.addWidget(yRoiWid)
grpLayout.addWidget(zRoiWid)
xRoiWid.sigSliderMoved.connect(self.__slotSliderMoved)
yRoiWid.sigSliderMoved.connect(self.__slotSliderMoved)
zRoiWid.sigSliderMoved.connect(self.__slotSliderMoved)
layout.addWidget(grpBox)
def __slotSliderMoved(self, sliderEvt):
"""
Slot called each time a slider moves.
:param sliderEvt:
:return:
"""
sender = self.sender()
if sender == self.__xRoiWid:
xState = sliderEvt
yState = self.__yRoiWid.slider().getSliderState()
zState = self.__zRoiWid.slider().getSliderState()
elif sender == self.__yRoiWid:
xState = self.__xRoiWid.slider().getSliderState()
yState = sliderEvt
zState = self.__zRoiWid.slider().getSliderState()
elif sender == self.__zRoiWid:
xState = self.__xRoiWid.slider().getSliderState()
yState = self.__yRoiWid.slider().getSliderState()
zState = sliderEvt
elif sender == self.__grpBox:
return
else:
raise RuntimeError('Unknown sender.')
self.sigRoiChanged.emit({'x': xState,
'y': yState,
'z': zState})
def isActive(self):
return self.__grpBox.isChecked()
def xSlider(self):
"""
Returns the RangeSlider for the X axis
:return:
"""
return self.__xRoiWid.slider()
def ySlider(self):
"""
Returns the RangeSlider for the X axis
:return:
"""
return self.__yRoiWid.slider()
def zSlider(self):
"""
Returns the RangeSlider for the X axis
:return:
"""
return self.__zRoiWid.slider()
class FitWidget(Qt.QWidget):
"""
Fit process widget.
:param qspaceFile:
:param kwargs:
"""
sigProcessDone = Qt.Signal(object)
""" Signal emitted when a fit is done. Argument is the name of the file
containing the results.
"""
sigProcessStarted = Qt.Signal()
""" Signal emitted when a fit is started. Argument is the name of the file
containing the results.
"""
FitTypes = OrderedDict([('Gaussian', FitTypes.GAUSSIAN),
('Centroid', FitTypes.CENTROID)])\
# ,
# ('Silx', FitTypes.SILX)])
__sigFitDone = Qt.Signal()
__progressDelay = 500
# TODO : pass the actual roi values
def __init__(self,
qspaceFile,
**kwargs):
super(FitWidget, self).__init__(**kwargs)
self.__qspaceH5 = qspaceH5 = QSpaceH5(qspaceFile)
self.__progTimer = None
self.__outputFile = None
self.__nPeaks = 1
layout = Qt.QGridLayout(self)
self.__roiWidget = roiWidget = Roi3DSelectorWidget()
layout.addWidget(roiWidget)
fileLayout = Qt.QHBoxLayout()
self.__fileEdit = fileEdit = StyledLineEdit(nChar=20, readOnly=True)
fileLayout.addWidget(Qt.QLabel('File :'))
fileLayout.addWidget(fileEdit)
layout.addLayout(fileLayout, 1, 0)
fitLayout = Qt.QHBoxLayout()
self.__fitTypeCb = fitTypeCb = Qt.QComboBox()
fitTypeCb.addItems(list(FitWidget.FitTypes.keys()))
fitTypeCb.setCurrentIndex(0)
fitLayout.addWidget(Qt.QLabel('Fit :'))
fitLayout.addWidget(fitTypeCb)
fitTypeCb.currentIndexChanged[str].connect(
self.__slotCurrentTextChanged)
layout.addLayout(fitLayout, 2, 0, alignment=Qt.Qt.AlignLeft)
self.__nPeaksSpinBox = spinbox = Qt.QSpinBox()
# spinbox.setMinimum(1)
# spinbox.setMaximum(20)
# spinbox.setValue(self.__nPeaks)
# spinbox.setToolTip('Max. number of expected peaks.')
# spinbox.valueChanged.connect(self.__slotValueChanged)
# fitLayout.addWidget(spinbox)
# fitLayout.addWidget(Qt.QLabel('peak(s)'))
runLayout = Qt.QHBoxLayout()
self.__runButton = runButton = Qt.QPushButton('Run')
runButton.setEnabled(False)
runButton.clicked.connect(self.__slotRunClicked)
runLayout.addWidget(runButton)
self.__progBar = progBar = Qt.QProgressBar()
runLayout.addWidget(progBar)
layout.addLayout(runLayout, 3, 0, alignment=Qt.Qt.AlignCenter)
self.__statusLabel = statusLabel = Qt.QLabel('Ready')
statusLabel.setFrameStyle(Qt.QFrame.Panel | Qt.QFrame.Sunken)
layout.addWidget(statusLabel, 4, 0)
with qspaceH5:
qx = qspaceH5.qx
qy = qspaceH5.qy
qz = qspaceH5.qz
roiWidget.xSlider().setRange([qx[0], qx[-1]])
roiWidget.ySlider().setRange([qy[0], qy[-1]])
roiWidget.zSlider().setRange([qz[0], qz[-1]])
self.__sigFitDone.connect(self.__slotFitDone)
layout.setRowStretch(layout.rowCount(), 1)
layout.setColumnStretch(layout.columnCount(), 1)
def roiWidget(self):
"""
Returns the Roi3DSelectorWidget instance.
:return:
"""
return self.__roiWidget
def setQSpaceIndex(self, index):
"""
Selects the qspace cube at *index* in the qspace H5 file, and
displays the corresponding profiles in the sliders.
(profile = cube summed along the corresponding axis)
:param index:
:return:
"""
qspace = self.__qspaceH5.qspace_slice(index)
z_sum = qspace.sum(axis=0).sum(axis=0)
cube_sum_z = qspace.sum(axis=2)
y_sum = cube_sum_z.sum(axis=0)
x_sum = cube_sum_z.sum(axis=1)
colors = cm.jet(np.arange(255))
cmap = [Qt.QColor.fromRgbF(*c).rgba() for c in colors]
roiWidget = self.__roiWidget
roiWidget.xSlider().setSliderProfile(x_sum, colormap=cmap)
roiWidget.ySlider().setSliderProfile(y_sum, colormap=cmap)
roiWidget.zSlider().setSliderProfile(z_sum, colormap=cmap)
def setOutputFile(self, outputFile):
self.__outputFile = outputFile
if outputFile is not None:
self.__fileEdit.setText(outputFile)
else:
self.__fileEdit.clear()
self.__runButton.setEnabled(outputFile is not None)
def __slotValueChanged(self, value):
self.__nPeaks = value
def __slotCurrentTextChanged(self, text):
blocked = self.__nPeaksSpinBox.blockSignals(True)
if text in ('Gaussian', 'Silx'):
self.__nPeaksSpinBox.setEnabled(True)
self.__nPeaksSpinBox.setValue(self.__nPeaks)
else:
self.__nPeaksSpinBox.setEnabled(False)
self.__nPeaksSpinBox.setValue(1)
self.__nPeaksSpinBox.blockSignals(blocked)
def __slotRunClicked(self):
# TODO : put some safeguards
self.__lock(True)
self.__progBar.setValue(0)
fitType = FitWidget.FitTypes[self.__fitTypeCb.currentText()]
if self.__roiWidget.isActive():
x0, x1 = self.__roiWidget.xSlider().getSliderIndices()
y0, y1 = self.__roiWidget.ySlider().getSliderIndices()
z0, z1 = self.__roiWidget.zSlider().getSliderIndices()
roiIndices = [[x0, x1 + 1], [y0, y1 + 1], [z0, z1 + 1]]
else:
roiIndices = None
self.__fitter = fitter = PeakFitter(self.__qspaceH5.filename,
fit_type=fitType,
roi_indices=roiIndices,
n_peaks=self.__nPeaks)
self.__statusLabel.setText('Running...')
self.__progTimer = timer = Qt.QTimer()
timer.setSingleShot(True)
timer.timeout.connect(self.__slotProgTimer)
try:
self.sigProcessStarted.emit()
fitter.peak_fit(blocking=False, callback=self.__sigFitDone.emit)
timer.start(self.__progressDelay)
except Exception as ex:
# TODO : popup
self.__statusLabel.setText('ERROR')
print('ERROR : {0}.'.format(ex))
self.__lock(False)
self.sigProcessDone.emit(None)
def __slotProgTimer(self):
if self.__fitter:
self.__progBar.setValue(self.__fitter.progress())
self.__progTimer.start(self.__progressDelay)
def __lock(self, lock):
enable = not lock
self.__roiWidget.setEnabled(enable)
self.__fitTypeCb.setEnabled(enable)
self.__runButton.setEnabled(enable)
def __slotFitDone(self):
self.__progTimer.stop()
self.__progTimer = None
self.__lock(False)
statusLabel = self.__statusLabel
fitter = self.__fitter
status = fitter.status
try:
self.__writeResults(fitter.results)
except Exception as ex:
# TODO : popup
print(ex)
status = PeakFitter.ERROR
if status == PeakFitter.DONE:
statusLabel.setText('Succes')
self.__progBar.setValue(100)
elif status == PeakFitter.ERROR:
# TODO : popup
statusLabel.setText('ERROR')
elif status == PeakFitter.CANCELED:
# TODO : popup
statusLabel.setText('Canceled')
else:
# TODO : popup
statusLabel.setText('?')
self.__fitter = None
self.sigProcessDone.emit(self.__outputFile)
def __writeResults(self, results):
with FitH5Writer(self.__outputFile, mode='w') as fitH5:
entry = results.entry
fitH5.create_entry(entry)
fitH5.set_scan_x(entry, results.sample_x)
fitH5.set_scan_y(entry, results.sample_y)
fitH5.set_qx(entry, results.q_x)
fitH5.set_qy(entry, results.q_y)
fitH5.set_qz(entry, results.q_z)
processes = results.processes()
for process in processes:
fitH5.create_process(entry, process)
for param in results.params(process):
xresult = results.results(process, param,
results.QX_AXIS)
yresult = results.results(process, param,
results.QY_AXIS)
zresult = results.results(process, param,
results.QZ_AXIS)
fitH5.set_qx_result(entry,
process,
param,
xresult)
fitH5.set_qy_result(entry,
process,
param,
yresult)
fitH5.set_qz_result(entry,
process,
param,
zresult)
xstatus = results.qx_status()
ystatus = results.qy_status()
zstatus = results.qz_status()
fitH5.set_status(entry,
FitH5QAxis.qx_axis,
xstatus)
fitH5.set_status(entry,
FitH5QAxis.qy_axis,
ystatus)
fitH5.set_status(entry,
FitH5QAxis.qz_axis,
zstatus)
if __name__ == '__main__':
pass
|
{"hexsha": "9259d88114ce626cee9ab9e489eaf741cdc609c5", "size": 14468, "ext": "py", "lang": "Python", "max_stars_repo_path": "xsocs/gui/process/FitWidget.py", "max_stars_repo_name": "omserta/xsocs", "max_stars_repo_head_hexsha": "5e1cf1352233498c48f0566e0b819e18373e95e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xsocs/gui/process/FitWidget.py", "max_issues_repo_name": "omserta/xsocs", "max_issues_repo_head_hexsha": "5e1cf1352233498c48f0566e0b819e18373e95e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xsocs/gui/process/FitWidget.py", "max_forks_repo_name": "omserta/xsocs", "max_forks_repo_head_hexsha": "5e1cf1352233498c48f0566e0b819e18373e95e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5684454756, "max_line_length": 79, "alphanum_fraction": 0.5877108101, "include": true, "reason": "import numpy", "num_tokens": 3268}
|
"""
Models for causal set graphs.
Available methods:
minkowski_interval(N, D)
de_sitter_interval(N, D, eta_0, eta_1)
causal_set_graph(R, p)
"""
# Copyright (C) 2016 by
# James Clough <james.clough91@gmail.com>
# All rights reserved.
# BSD license.
__author__ = "\n".join(["James Clough (james.clough91@gmail.com)"])
import networkx as nx
import numpy as np
import dagology as dag
__all__ = ['causal_set_graph',
'minkowski_interval',
'de_sitter_interval']
def causal_set_graph(R, p=1.0, periodic=None):
"""
Create a Causal Set DAG from a set of coordinates, an NxD numpy array
Parameters
----------
R - coordinates of points
p - probability with which allowed edges appear
periodic - list - the periodic size of each dimension
Notes
-----
We are assuming a conformal spacetime - ie. lightcones are straight lines
and therefore can calculate whether two points should be connected using
the Minkowski metric.
"""
G = nx.DiGraph()
N, D = R.shape
edgelist = []
for i in range(N):
G.add_node(i, position=tuple(R[i]))
for j in range(N):
if R[i, 0] < R[j, 0]:
if p == 1. or p > np.random.random():
if periodic:
if dag.minkowski_periodic(R[i], R[j], periodic) < 0:
edgelist.append([i,j])
else:
if dag.minkowski(R[i], R[j]) < 0:
edgelist.append([i,j])
G.add_edges_from(edgelist)
return G
def minkowski_interval_scatter(N, D, fix_ends=True):
""" Scatter N points in a D dimensional interval in Minkowski space
Parameters
----------
N - number of points
D - dimension of spacetime
fix_ends - if True, have points at start and end of interval
Notes
-----
Throw points into a unit box rejecting those outside the interval
Repeat until N points have been reached
Note that this is inefficient for large D"""
R = np.random.random((N, D))
a = np.zeros(D)
a[1:] = 0.5
b = np.zeros(D)
b[0] = 1.
b[1:] = 0.5
if fix_ends:
R[0] = a
R[1] = b
i_start = 2
else:
i_start = 0
for i in range(i_start, N):
while (dag.minkowski(a, R[i, :]) > 0) or ((dag.minkowski(R[i, :], b) > 0)):
R[i, :] = np.random.random(D)
return R
def minkowski_interval_map(N, D, fix_ends=True):
""" Scatter N points in a D dimensional interval in Minkowski space
Build Minkowski interval in `clever' way by mapping [0,1]^D to
the correct spacetime coords
"""
assert False, 'ERROR - minkowski_interval_map not implemented yet'
def minkowski_interval(N, D, fix_ends=True, method='scatter'):
""" Scatter N points in a D dimensional interval in Minkowski space
Available methods are:
scatter -- place points in unit cube and check they lie within
the appropriate interval, keeping those that do.
This is slow for large D - something like 2^D slowdown
map -- map D unit cube to the relevant interval respecting volume elements
not yet implemented
"""
if method == 'scatter':
return minkowski_interval_scatter(N, D, fix_ends)
elif method == 'map':
return minkowski_interval_map(N, D, fix_ends)
else:
assert False, 'Invalid method %s given to minkowski_interval' % method
def sphere_surface_cartesian(N, D):
""" Generate N points uniformly sampled from surface of a D-sphere
Return Cartesian coordinates
Using normal distributions as multivariate normal is spherically symmetric
"""
R = np.random.randn(N, D + 1)
R_sq = R * R
R_sq_sum = np.sqrt(np.sum(R_sq, axis=1))
R_norm = R_sq_sum.reshape(N, 1)
return R / R_norm
def sphere_surface_angular(N, D):
""" Generate N points uniformly sampled from surface of a D-sphere"""
X = sphere_surface_cartesian(N, D)
R = np.zeros((N, D))
for i in range(N):
R[i, :] += dag.cartesian_to_angular(X[i, :])
return R
def hyperbolic_disk(N, R, a=1.):
""" Scatter N points in a 2 dimensional hyperbolic manifold with curvature a
The points are scattered uniformly with inside a disk of radius R
We are using the native representation, where polar coordinate r
is the hyperbolic distance to the origin"""
X = np.random.rand(N, 2)
X[:, 1] *= (2. * np.pi)
A_R = np.cosh(R * a) - 1.
X[:, 0] = np.arccosh((X[:, 0] * A_R) + 1.) / a
return X
def de_sitter_interval(N, D, KT2, fix_ends=False, method='scatter'):
if method == 'scatter':
return de_sitter_interval_scatter(N, D, KT2, fix_ends)
elif method == 'map':
return de_sitter_interval_map(N, D, KT2, fix_ends)
else:
assert False, 'Invalid method %s given to de_sitter_interval' % method
def de_sitter_interval_scatter(N, D, KT2, fix_ends=False):
""" Scatter N points in a D dimensional interval in de Sitter spacetime
This function uses the method described in Meyer1988 - a rejection method
We scatter using a conformal factor of sigma=1+K/4(ds^2)
"""
assert 0. < (KT2) < 4., 'KT^2 must be between 0 and 4 for this method'
Z = np.empty(shape=(0, D))
# rejection method
# go in batches of size N
while Z.shape[0] < N:
R = minkowski_interval(N, D, fix_ends=fix_ends)
R[:, 1:] -= 0.5 # fix back to 0 centre spatially
M = (1. - (KT2 * 0.25))**(-D) # maximum value
m = np.random.rand(N) * M # random assignments in that range
S = (-1. * R[:,0]**2) + np.sum(R[:,1:]**2, axis=1) # proper time for each point
sigma = (1. + (0.25 * KT2 * S))**(-D)
Z = np.concatenate([Z, R[m < sigma]], axis=0)
if fix_ends:
Z[0,:] = 0.
Z[1,:] = 0.
Z[1,0] = 1.
return Z[:N]
def de_sitter_interval_map(N, D, KT2, fix_ends=False):
assert False, 'Not implemented yet'
|
{"hexsha": "7d40a25450e27be1232e321dc8440f942735e73b", "size": 6024, "ext": "py", "lang": "Python", "max_stars_repo_path": "dagology/generators/causal_set.py", "max_stars_repo_name": "JamesClough/dagology", "max_stars_repo_head_hexsha": "5421fd0ad439e70a61d0408eb1cacebaa403f671", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-02-16T21:35:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-09T07:33:30.000Z", "max_issues_repo_path": "dagology/generators/causal_set.py", "max_issues_repo_name": "JamesClough/dagology", "max_issues_repo_head_hexsha": "5421fd0ad439e70a61d0408eb1cacebaa403f671", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dagology/generators/causal_set.py", "max_forks_repo_name": "JamesClough/dagology", "max_forks_repo_head_hexsha": "5421fd0ad439e70a61d0408eb1cacebaa403f671", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-04-20T08:58:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-11T02:25:56.000Z", "avg_line_length": 30.4242424242, "max_line_length": 87, "alphanum_fraction": 0.609561753, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1676}
|
'''
This is a sample class for a model. You may choose to use it as-is or make any changes to it.
This has been provided just to give you an idea of how to structure your model class.
'''
import cv2
import numpy as np
import os
from openvino.inference_engine import IECore,IENetwork,IEPlugin
class FaceDetectionModel:
'''
Class for the Face Detection Model.
'''
def __init__(self, model_name, device='CPU', extensions=None):
self.model_name = model_name
self.device = device
self.extensions = extensions
self.model_structure = self.model_name
self.model_weights = self.model_name.split('.')[0]+'.bin'
self.plugin = None
self.network = None
self.exec_net = None
self.input_name = None
self.input_shape = None
self.output_names = None
self.output_shape = None
try:
self.model=IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name=next(iter(self.model.inputs))
self.input_shape=self.model.inputs[self.input_name].shape
self.output_name=next(iter(self.model.outputs))
self.output_shape=self.model.outputs[self.output_name].shape
def load_model(self):
self.plugin=IECore()
supported_layers = self.plugin.query_network(network=self.model, device_name=self.device)
unsupported_layers = [l for l in self.model.layers.keys() if l not in supported_layers]
if len(unsupported_layers)!=0:
print("unsupported layers found")
exit(1)
self.exec_net=self.plugin.load_network(network=self.model,device_name=self.device,num_requests=1)
def predict(self, image,prob_threshold):
processed_image=self.preprocess_input(image.copy())
outputs = self.exec_net.infer({self.input_name:processed_image})
coords = self.preprocess_output(outputs, prob_threshold)
if (len(coords)==0):
return 0, 0
coords = coords[0]
h=image.shape[0]
w=image.shape[1]
coords = coords* np.array([w, h, w, h])
coords = coords.astype(np.int32)
cropped_face = image[coords[1]:coords[3], coords[0]:coords[2]]
return cropped_face, coords
def check_model(self):
raise NotImplementedError
def preprocess_input(self, image):
self.image=cv2.resize(image,(self.input_shape[3],self.input_shape[2])) ## cv2.resize(frame, (w, h))
self.image=self.image.transpose((2, 0, 1))
self.image=self.image.reshape(1, *self.image.shape)
return self.image
def preprocess_output(self, outputs,prob_threshold):
coords =[]
outs = outputs[self.output_name][0][0]
for out in outs:
conf = out[2]
if conf>prob_threshold:
x_min=out[3]
y_min=out[4]
x_max=out[5]
y_max=out[6]
coords.append([x_min,y_min,x_max,y_max])
return coords
|
{"hexsha": "fbb6d2622f351da3ee625bbc1a24da833688b544", "size": 3274, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/face_detection.py", "max_stars_repo_name": "DhruvKinger/Pointer-Controller", "max_stars_repo_head_hexsha": "ba82623987cdc6f4748e761743f207b154db95dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-08T07:03:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-08T07:03:23.000Z", "max_issues_repo_path": "src/face_detection.py", "max_issues_repo_name": "DhruvKinger/Pointer-Controller", "max_issues_repo_head_hexsha": "ba82623987cdc6f4748e761743f207b154db95dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-06-08T21:28:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:27:49.000Z", "max_forks_repo_path": "src/face_detection.py", "max_forks_repo_name": "DhruvKinger/Pointer-Controller", "max_forks_repo_head_hexsha": "ba82623987cdc6f4748e761743f207b154db95dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-10T11:33:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-10T11:33:25.000Z", "avg_line_length": 32.4158415842, "max_line_length": 109, "alphanum_fraction": 0.6136224801, "include": true, "reason": "import numpy", "num_tokens": 715}
|
"""
Copyright 2021 Max-Planck-Gesellschaft
Code author: Jan Achterhold, jan.achterhold@tuebingen.mpg.de
Embodied Vision Group, Max Planck Institute for Intelligent Systems, Tübingen
This source code is licensed under the MIT license found in the
LICENSE.md file in the root directory of this source tree or at
https://opensource.org/licenses/MIT.
"""
import matplotlib.pyplot as plt
import numpy as np
from context_exploration.data.envs import make_env
from context_exploration.model.context_encoder import ContextSet
from context_exploration.model.loader import load_model
def angular_plot(angle, angular_velocity, ax):
"""
Parameters
----------
angle : np.ndarray, [n_contexts x <bs>]
angular_velocity : np.ndarray, [n_contexts x <bs>]
"""
n_contexts = angle.shape[0]
context_idx = np.broadcast_to(
np.arange(n_contexts)[(slice(None),) + (None,) * (angle.ndim - 1)], angle.shape
).flatten()
x = np.cos(angle + np.pi / 2).flatten()
y = np.sin(angle + np.pi / 2).flatten()
r = 1 + 0.2 * angular_velocity.flatten()
c = plt.cm.RdYlBu(context_idx / n_contexts)
ax.scatter(x * r, y * r, color=c)
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
def quadrant_pendulum_entropy_plot(
env,
context_encoder,
ax_0=None,
ax_1=None,
ylim=None,
scatter_rad=10,
scatter_alpha=0.5,
scatter_marker="o",
text_y=-6.5,
fontsize=8,
):
n_contexts = 5
per_quadrant_samples = 15
if ax_0 is None:
fig, ax_arr = plt.subplots(nrows=1, ncols=2)
ax_0 = ax_arr[0]
ax_1 = ax_arr[1]
sample_gen = np.random.RandomState(1)
angle_samples = []
for quadrant in [0, 1, 2, 3]:
# First quadrant is "1"
quadrant = (quadrant + 1) % 4
samples = sample_gen.uniform(
-np.pi + quadrant * np.pi / 2 + np.pi / 16,
-np.pi + (quadrant + 1) * np.pi / 2 - np.pi / 16,
(n_contexts, per_quadrant_samples),
)
angle_samples.append(samples)
angle_samples = np.stack(angle_samples, axis=1)
# angle_samples: n_contexts x quadrant x per_quadrant_samples
# sample uniformly from -4..-2, 2..4
velocity_samples = sample_gen.uniform(2, 4, angle_samples.shape)
velocity_samples *= sample_gen.choice(np.array([-1, 1]), angle_samples.shape)
actions = sample_gen.uniform(-1, 1, angle_samples.shape)[..., None]
if ax_1:
angular_plot(angle_samples, velocity_samples, ax_1)
initial_states = np.stack((angle_samples, velocity_samples), axis=-1)
context_seed_gen = np.random.RandomState(543)
entropies = np.zeros((n_contexts, 4 * per_quadrant_samples + 1))
for context_idx in range(n_contexts):
context_seed = context_seed_gen.randint(0, int(1e8))
transitions = generate_transitions(env, context_seed, initial_states, actions)
# transitions: (x: quadrant x per_quadrant_samples x state_dim, u, x_next)
transitions = [t.reshape(-1, t.shape[-1]) for t in transitions]
# transitions: (x: quadrant * per_quadrant_samples x state_dim, u, x_next)
for set_size in range(4 * per_quadrant_samples + 1):
limited_transitions = [t[:set_size] for t in transitions]
context_set = ContextSet.from_array(*limited_transitions)
encoding = context_encoder.forward_set(context_set)
entropies[context_idx, set_size] = encoding.entropy().sum(dim=-1)
for context_idx in range(n_contexts):
ax_0.scatter(
np.arange(1),
entropies[context_idx][:1],
s=20 * scatter_rad,
color="r",
marker=scatter_marker,
)
ax_0.scatter(
np.arange(1, 4 * per_quadrant_samples + 1),
entropies[context_idx][1:],
s=scatter_rad,
color="b",
alpha=scatter_alpha,
marker=scatter_marker,
)
if ylim:
ax_0.set_ylim(*ylim)
else:
ax_0.set_ylim(-7, 12)
ax_0.set_xlim(-2, per_quadrant_samples * 4)
texts = ["Q1", "Q1+Q2", "Q1+Q2\n+Q3", "Q1+Q2\n+Q3+Q4"]
for quadrant in range(4):
text = texts[quadrant]
ax_0.text(
x=quadrant * per_quadrant_samples + 2, y=text_y, s=text, fontsize=fontsize
)
ax_0.axvline(
x=0.5 + quadrant * per_quadrant_samples,
ymin=0,
ymax=1,
color="k",
alpha=0.5,
)
def generate_transitions(env, context_seed, initial_states, actions):
assert initial_states.shape[:-1] == actions.shape[:-1]
env.initialize_context(context_seed)
states_flat = initial_states.reshape(-1, initial_states.shape[-1])
actions_flat = actions.reshape(-1, actions.shape[-1])
obs = []
obs_next = []
base_env = env
while not (base_env.unwrapped == base_env):
base_env = base_env.unwrapped
for state, action in zip(states_flat, actions_flat):
env.reset()
base_env.state = state
obs.append(env.unwrapped.get_obs())
assert np.allclose(np.arctan2(obs[-1][1], obs[-1][0]), state[0])
assert np.allclose(obs[-1][2], state[1])
obs_next.append(env.step(action)[0])
obs = np.stack(obs).reshape(*initial_states.shape[:-1], obs[0].shape[-1])
obs_next = np.stack(obs_next).reshape(
*initial_states.shape[:-1], obs_next[0].shape[-1]
)
env.release_context()
return obs, actions, obs_next
if __name__ == "__main__":
run_id = "cr_s1_pendulum_bd_posweights_relu_npklw5"
checkpoint_step = "100000_best"
device = "cuda"
env_name, transition_model, context_encoder, log_likelihood_model = load_model(
run_id, checkpoint_step, device
)
env = make_env(env_name)
quadrant_pendulum_entropy_plot(env, context_encoder, ylim=(5, 25), text_y=5.8)
plt.show()
|
{"hexsha": "38c8ef6c63406a773f2e81190dbbb116bb7ce916", "size": 5862, "ext": "py", "lang": "Python", "max_stars_repo_path": "context_exploration/evaluation/quadrant_pendulum_evaluation.py", "max_stars_repo_name": "EmbodiedVision/explorethecontext", "max_stars_repo_head_hexsha": "d4bd0d4af980de16ede642c987878a55e089f736", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "context_exploration/evaluation/quadrant_pendulum_evaluation.py", "max_issues_repo_name": "EmbodiedVision/explorethecontext", "max_issues_repo_head_hexsha": "d4bd0d4af980de16ede642c987878a55e089f736", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "context_exploration/evaluation/quadrant_pendulum_evaluation.py", "max_forks_repo_name": "EmbodiedVision/explorethecontext", "max_forks_repo_head_hexsha": "d4bd0d4af980de16ede642c987878a55e089f736", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2807017544, "max_line_length": 87, "alphanum_fraction": 0.636642784, "include": true, "reason": "import numpy", "num_tokens": 1601}
|
(* This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.*)
theory TIP_sort_nat_ISortPermutes
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
fun le :: "Nat => Nat => bool" where
"le (Z) y = True"
| "le (S z) (Z) = False"
| "le (S z) (S x2) = le z x2"
fun insert :: "Nat => Nat list => Nat list" where
"insert x (nil2) = cons2 x (nil2)"
| "insert x (cons2 z xs) =
(if le x z then cons2 x (cons2 z xs) else cons2 z (insert x xs))"
fun isort :: "Nat list => Nat list" where
"isort (nil2) = nil2"
| "isort (cons2 y xs) = insert y (isort xs)"
fun elem :: "'a => 'a list => bool" where
"elem x (nil2) = False"
| "elem x (cons2 z xs) = ((z = x) | (elem x xs))"
fun deleteBy :: "('a => ('a => bool)) => 'a => 'a list =>
'a list" where
"deleteBy x y (nil2) = nil2"
| "deleteBy x y (cons2 y2 ys) =
(if (x y) y2 then ys else cons2 y2 (deleteBy x y ys))"
fun isPermutation :: "'a list => 'a list => bool" where
"isPermutation (nil2) (nil2) = True"
| "isPermutation (nil2) (cons2 z x2) = False"
| "isPermutation (cons2 x3 xs) y =
((elem x3 y) &
(isPermutation
xs (deleteBy (% (x4 :: 'a) => % (x5 :: 'a) => (x4 = x5)) x3 y)))"
theorem property0 :
"isPermutation (isort xs) xs"
oops
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/UR/TIP/TIP15/TIP15/TIP_sort_nat_ISortPermutes.thy"}
|
!==========================================================================
elemental subroutine gsw_specvol_second_derivatives_wrt_enthalpy (sa, ct, &
p, v_sa_sa, v_sa_h, v_h_h, iflag)
! =========================================================================
!
! Calculates three first-order derivatives of specific volume (v) with
! respect to enthalpy. Note that this function uses the using the
! computationally-efficient expression for specific volume
! (Roquet et al., 2014).
!
! SA = Absolute Salinity [ g/kg ]
! CT = Conservative Temperature (ITS-90) [ deg C ]
! p = sea pressure [ dbar ]
! ( i.e. absolute pressure - 10.1325 dbar )
!
! v_SA_SA = The second-order derivative of specific volume with respect to
! Absolute Salinity at constant h & p. [ J/(kg (g/kg)^2) ]
! v_SA_h = The second-order derivative of specific volume with respect to
! SA and h at constant p. [ J/(kg K(g/kg)) ]
! v_h_h = The second-order derivative with respect to h at
! constant SA & p.
!--------------------------------------------------------------------------
use gsw_mod_toolbox, only : gsw_enthalpy_first_derivatives
use gsw_mod_toolbox, only : gsw_enthalpy_second_derivatives
use gsw_mod_toolbox, only : gsw_specvol_first_derivatives
use gsw_mod_toolbox, only : gsw_specvol_second_derivatives
use gsw_mod_kinds
implicit none
real (r8), intent(in) :: sa, ct, p
integer, intent(in), optional :: iflag
real (r8), intent(out), optional :: v_sa_sa, v_sa_h, v_h_h
logical :: flags(3)
real (r8) :: h_ct, h_ct_ct, h_sa, h_sa_ct, h_sa_sa, rec_h_ct, v_h_h_part
real (r8) :: rec_h_ct2, v_ct, vct_ct_ct, vct_sa_ct, vct_sa_sa, v_sa_h_part
if (present(iflag)) then
flags(1) = present(v_sa_sa) .and. btest(iflag,1)
flags(2) = present(v_sa_h) .and. btest(iflag,2)
flags(3) = present(v_h_h) .and. btest(iflag,3)
else
flags(1) = present(v_sa_sa)
flags(2) = present(v_sa_h)
flags(3) = present(v_h_h)
end if
call gsw_specvol_first_derivatives(sa,ct,p,v_ct=v_ct)
if (flags(1) .or. flags(2)) then
call gsw_enthalpy_first_derivatives(sa,ct,p,h_sa,h_ct)
else
call gsw_enthalpy_first_derivatives(sa,ct,p,h_ct=h_ct)
end if
if (flags(1)) then
call gsw_specvol_second_derivatives(sa,ct,p,vct_sa_sa,vct_sa_ct,vct_ct_ct)
else if (flags(2)) then
call gsw_specvol_second_derivatives(sa,ct,p,v_sa_ct=vct_sa_ct, &
v_ct_ct=vct_ct_ct)
else
call gsw_specvol_second_derivatives(sa,ct,p,v_ct_ct=vct_ct_ct)
end if
if (flags(1)) then
call gsw_enthalpy_second_derivatives(sa,ct,p,h_sa_sa,h_sa_ct,h_ct_ct)
else if (flags(2)) then
call gsw_enthalpy_second_derivatives(sa,ct,p,h_sa_ct=h_sa_ct,h_ct_ct=h_ct_ct)
else
call gsw_enthalpy_second_derivatives(sa,ct,p,h_ct_ct=h_ct_ct)
end if
rec_h_ct = 1.0_r8/h_ct
rec_h_ct2 = rec_h_ct**2.0_r8
v_h_h_part = (vct_ct_ct*h_ct - h_ct_ct*v_ct)*(rec_h_ct2*rec_h_ct)
if (flags(3)) v_h_h = v_h_h_part
if (flags(1) .or. flags(2)) then
v_sa_h_part = (vct_sa_ct*h_ct - v_ct*h_sa_ct)*rec_h_ct2 - h_sa*v_h_h_part
if (flags(2)) v_sa_h = v_sa_h_part
if (flags(1)) v_sa_sa = vct_sa_sa - (h_ct*(vct_sa_ct*h_sa &
- v_ct*h_sa_sa) + v_ct*h_sa*h_sa_ct)*rec_h_ct2 - h_sa*v_sa_h_part
end if
return
end subroutine
!--------------------------------------------------------------------------
|
{"hexsha": "1a63b500144698ceaa192280556ba30d7af31244", "size": 3535, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "MOM6-interface/MOM6/pkg/GSW-Fortran/toolbox/gsw_specvol_second_derivatives_wrt_enthalpy.f90", "max_stars_repo_name": "minsukji/ci-debug", "max_stars_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_stars_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MOM6-interface/MOM6/pkg/GSW-Fortran/toolbox/gsw_specvol_second_derivatives_wrt_enthalpy.f90", "max_issues_repo_name": "minsukji/ci-debug", "max_issues_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_issues_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-05-21T20:21:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-06T17:52:34.000Z", "max_forks_repo_path": "MOM6-interface/MOM6/pkg/GSW-Fortran/toolbox/gsw_specvol_second_derivatives_wrt_enthalpy.f90", "max_forks_repo_name": "minsukji/ci-debug", "max_forks_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_forks_repo_licenses": ["Apache-2.0", "CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4432989691, "max_line_length": 80, "alphanum_fraction": 0.6155586987, "num_tokens": 1046}
|
function p = hexagon_shape_2d ( angle )
%*****************************************************************************80
%
%% HEXAGON_SHAPE_2D returns points on the unit regular hexagon in 2D.
%
% Diagram:
%
% 120_____60
% / \
% 180/ \0
% \ /
% \_____/
% 240 300
%
% Discussion:
%
% The unit regular hexagon has radius 1. The radius is the distance from
% the center to any vertex, and it is also the length of any side.
% An example of a unit hexagon is the convex hull of the points:
%
% ( 1, 0 ),
% ( 0.5, sqrt (3)/2 ),
% ( - 0.5, sqrt (3)/2 ),
% ( - 1, 0 ),
% ( - 0.5, - sqrt (3)/2 ),
% ( 0.5, - sqrt (3)/2 ).
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 12 May 2005
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, real ANGLE, the angle, in degrees, of the point.
%
% Output, real P(2,1), the coordinates of the point.
%
%
% Ensure that 0 <= ANGLE < 360.
%
angle2 = r8_modp ( angle, 360.0 );
%
% y = - sqrt(3) * x + sqrt(3)
%
if ( 0.0 <= angle2 & angle2 <= 60.0 )
p(1,1) = sqrt ( 3.0 ) / ( tan_deg ( angle2 ) + sqrt ( 3.0 ) );
p(2,1) = tan_deg ( angle2 ) * p(1,1);
%
% y = sqrt(3) / 2
%
elseif ( angle2 <= 120.0 )
p(2,1) = sqrt ( 3.0 ) / 2.0;
p(1,1) = cot_deg ( angle2 ) * p(2,1);
%
% y = sqrt(3) * x + sqrt(3)
%
elseif ( angle2 <= 180.0 )
p(1,1) = sqrt ( 3.0 ) / ( tan_deg ( angle2 ) - sqrt ( 3.0 ) );
p(2,1) = tan_deg ( angle2 ) * p(1,1);
%
% y = - sqrt(3) * x - sqrt(3)
%
elseif ( angle2 <= 240.0 )
p(1,1) = - sqrt ( 3.0 ) / ( tan_deg ( angle2 ) + sqrt ( 3.0 ) );
p(2,1) = tan_deg ( angle2 ) * p(1,1);
%
% y = - sqrt(3) / 2
%
elseif ( angle2 <= 300.0 )
p(2,1) = - sqrt ( 3.0 ) / 2.0;
p(1,1) = cot_deg ( angle2 ) * p(2,1);
%
% y = sqrt(3) * x - sqrt(3)
%
elseif ( angle2 <= 360.0 )
p(1,1) = - sqrt ( 3.0 ) / ( tan_deg ( angle2 ) - sqrt ( 3.0 ) );
p(2,1) = tan_deg ( angle2 ) * p(1,1);
end
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/geometry/hexagon_shape_2d.m"}
|
import warnings, logging, sys
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.models import model_from_json
import pickle
import numpy as np
import matplotlib.pyplot as plt
import cv2
import pandas as pd
import os
import shutil
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from edward.models import Bernoulli, Normal, Categorical, Empirical
from edward.util import Progbar
from keras.layers import Dense
from scipy.misc import imsave
import matplotlib.pyplot as plt
from edward.util import Progbar
import edward as ed
import gc
import time
from Properties import *
warnings.filterwarnings('ignore')
logging.disable(sys.maxsize)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--imnum")
parser.add_argument("--sev")
parser.add_argument("--mode")
parser.add_argument("--path")
args = parser.parse_args()
image_num = int(args.imnum)
max_eps = float(args.sev)
mode = int(args.mode)
model_path = str(args.path)
weights_path = model_path + "SampledModels"
channels = 1
# Use the TensorFlow method to download and/or load the data.
mnist = input_data.read_data_sets("./MNIST_data/", one_hot=True)
x_test = mnist.test.images
y_test = mnist.test.labels
X_test = x_test
N = 256 # number of images in a minibatch.
D = 784 # number of features.
K = 10 # number of classes.
width = 512
import math
from statsmodels.stats.proportion import proportion_confint
#from IPython.display import clear_output, display
# also known as the chernoff bound
def okamoto_bound(epsilon, delta):
return (-1*.5) * math.log(float(delta)/2) * (1.0/(epsilon**2))
# This is h_a in the paper
def absolute_massart_halting(succ, trials, I, epsilon):
gamma = float(succ)/trials
if(I[0] < 0.5 and I[1] > 0.5):
return -1
elif(I[1] < 0.5):
val = I[1]
h = (9/2.0)*(((3*val + epsilon)*(3*(1-val)-epsilon))**(-1))
return math.ceil((h*(epsilon**2))**(-1) * math.log((delta - alpha)**(-1)))
elif(I[0] >= 0.5):
val = I[0]
h = (9/2.0)*(((3*(1-val) + epsilon)*((3*val)+epsilon))**(-1))
return math.ceil((h*(epsilon**2))**(-1) * math.log((delta - alpha)**(-1)))
"""
For now this algorithm is set up to solve problem formulation one
from our ICML2019 paper.
With m_delta = <r> we check the <r>-robustness of the model with respect
to the property specified (example properties above)
with m_detla = -1 we check the probabalistic safety of the model
@param epsilon: the permitted error in our estimated 'safety' variable.
@param delta: the permitted probability that our estimated 'safety' variable is incorrect
@param alpha: significance value used in exact clopper-pearson interval estimate
@param testproperty: method to verify property of interest(see documentation for format)
@param inp: the test input to check
@param out: the ground truth (optional)
@param m_delta: safety/robustness radius to check (see above).
"""
def sequential_massart(epsilon, delta, alpha, networks, testproperty, inp,
m_delta, out=None, max_k=1, attacking=False):
atk_locs = []
chernoff_bound = math.ceil( (1/(2*epsilon**2)) * math.log(2/delta) )
#print("Maximum bound = %s"%(chernoff_bound))
successes, iterations, misses = 0.0, 0.0, 0.0
halting_bound = chernoff_bound
I = [0,1]
print "Maximum halting bound: ", halting_bound
while(iterations <= halting_bound):
#clear_output(wait=True)
if(iterations > 0):
print("Working on iteration: %s \t Bound: %s \t Param: %s"%(iterations, halting_bound, successes/iterations))
try:
model.load_weights(networks[int(iterations)])
except:
m = np.load(networks[int(iterations)])
start = 2
model.layers[start].set_weights([m['arr_0'][0],m['arr_0'][1]])
model.layers[start+1].set_weights([m['arr_0'][2], m['arr_0'][3]])
result = testproperty(inp, out, model, m_delta, max_k=max_k)
if(result == -1):
misses += 1
result = 0
successes += result
iterations += 1
# Setting the method equal to 'beta' here gives us clopper-pearson
# and ensures that these bounds are exact.
lb, ub = proportion_confint(successes, iterations, method='beta')
if(math.isnan(lb)):
lb = 0.0 # Setting lb to zero if it is Nans
if(math.isnan(ub)):
ub = 1.0 # Setting ub to one if it is Nans
I = [lb, ub]
hb = absolute_massart_halting(successes, iterations, I, epsilon)
if(hb == -1):
halting_bound = chernoff_bound
else:
halting_bound = min(hb, chernoff_bound)
print("Exited becuase %s >= %s"%(iterations, halting_bound))
return successes/iterations, misses/iterations
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import InputLayer, Flatten
from keras.layers import Reshape
import time
propertymodel = Sequential()
propertymodel.add(Dense(512, activation='relu', input_shape=(784,)))
propertymodel.add(Dense(10, activation='softmax'))
propertymodel.summary()
propertymodel.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
attackingmodel = Sequential()
attackingmodel.add(InputLayer(input_shape=(1,28,28)))
attackingmodel.add(Reshape((1,784)))
attackingmodel.add(Dense(512, activation='relu'))
attackingmodel.add(Dense(10, activation='softmax'))
attackingmodel.summary()
attackingmodel.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
if(mode==0):
verifier = FGSM_verifier
if(max_eps == 0):
max_eps = 0.05
elif(max_eps == 1):
max_eps = 0.10
elif(max_eps == 2):
max_eps = 0.25
elif(max_eps == 3):
max_eps = 0.50
else:
max_eps = 0.75
veri_directory = "FGSM"
model = attackingmodel
elif(mode==1):
verifier = PGD_verifier
if(max_eps == 0):
max_eps = 0.05
elif(max_eps == 1):
max_eps = 0.10
elif(max_eps == 2):
max_eps = 0.25
elif(max_eps == 3):
max_eps = 0.50
else:
max_eps = 0.75
veri_directory = "PGD"
model = attackingmodel
elif(mode==2):
verifier = translational_verifier
if(max_eps == 0):
max_eps = 1
elif(max_eps == 1):
max_eps = 2
elif(max_eps == 2):
max_eps = 3
elif(max_eps == 3):
max_eps = 4
else:
max_eps = 5
veri_directory = "TRANS"
model = attackingmodel
elif(mode==3):
verifier = rotational_verifier
if(max_eps == 1):
max_eps = 1
elif(max_eps == 2):
max_eps = 2
elif(max_eps == 3):
max_eps = 3
elif(max_eps == 4):
max_eps = 4
else:
max_eps = 5
veri_directory = "ROTA"
model = attackingmodel
elif(mode==4):
verifier = CWL2_verifier
if(max_eps == 1):
max_eps = 0.15
elif(max_eps == 2):
max_eps = 0.30
else:
max_eps = 0.45
veri_directory = 'CW'
model = attackingmodel
from os import listdir
from os.path import isfile, join
# Load in the model weights from this training
weights_path = weights_path + '/'
model_weights = [weights_path + f for f in listdir(weights_path) if isfile(join(weights_path, f))]
print len(model_weights)
epsilon = 0.075
delta = 0.075
alpha = 0.05
x_test = X_test
m_delta = 0.25
P_NORM = 2
sess = backend.get_session()
if not os.path.exists(model_path + "Results/"):
os.mkdir(model_path + "Results/")
if not os.path.exists(model_path + "Results/%s_Attacks/"%(veri_directory)):
os.mkdir(model_path + "Results/%s_Attacks/"%(veri_directory))
attack_locals = []
start = time.time()
val, misses = sequential_massart(epsilon, delta, alpha, model_weights,
verifier, x_test[image_num], m_delta, max_k=max_eps, attacking=True)
end = time.time()
# Need to save these
try:
avg = sum(attack_locals)/len(attack_locals)
except:
avg = np.zeros((28,28))
avg = np.reshape(avg, (28,28))
variation = np.zeros((28,28))
for i in attack_locals:
variation += (avg-i)**2
try:
variation/=N
except:
print("Divide by zero... but continuing")
var = np.reshape(variation, (28,28))
f=open(model_path + "Results/Stats-%s.txt"%(veri_directory), "a+")
f.write("| %s - %s - %s - (%s) |"%(image_num,val,end-start, max_eps))
if not os.path.exists(model_path + "Results/%s_Attacks/image_%s_eps_%s"%(veri_directory, image_num,max_eps)):
os.mkdir(model_path +"Results/%s_Attacks/image_%s_eps_%s"%(veri_directory, image_num,max_eps))
#np.savetxt(model_path + "FGSM_Attacks/image_%s_eps_%s/original_image_val=%s.txt"%(image_num, max_eps, val), x_test[image_num])
cv2.imwrite(model_path + "Results/%s_Attacks/image_%s_eps_%s/original_image_val=%s.png"%(veri_directory, image_num, max_eps, val), x_test[image_num]+0.5)
np.savetxt(model_path + "Results/%s_Attacks/image_%s_eps_%s/average_manip_b.txt"%(veri_directory, image_num,max_eps), avg)
np.savetxt(model_path + "Results/%s_Attacks/image_%s_eps_%s/variance_b.txt"%(veri_directory,image_num,max_eps), var)
|
{"hexsha": "4ad9049daa7bef929a189b4d1555c2781267cf0f", "size": 9368, "ext": "py", "lang": "Python", "max_stars_repo_path": "MNIST/RobustnessTest.py", "max_stars_repo_name": "matthewwicker/StatisticalGuarenteesForBNNs", "max_stars_repo_head_hexsha": "1f585636c152b8489e331641c743ff628c2b7cc7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-03-09T21:31:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T13:51:09.000Z", "max_issues_repo_path": "MNIST/RobustnessTest.py", "max_issues_repo_name": "matthewwicker/StatisticalGuarenteesForBNNs", "max_issues_repo_head_hexsha": "1f585636c152b8489e331641c743ff628c2b7cc7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MNIST/RobustnessTest.py", "max_forks_repo_name": "matthewwicker/StatisticalGuarenteesForBNNs", "max_forks_repo_head_hexsha": "1f585636c152b8489e331641c743ff628c2b7cc7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7559322034, "max_line_length": 153, "alphanum_fraction": 0.6649231426, "include": true, "reason": "import numpy,from scipy,from statsmodels", "num_tokens": 2534}
|
import numpy as np
class Transform:
"""
Positional data for an object, Magnebot, body part, etc.
"""
def __init__(self, position: np.array, rotation: np.array, forward: np.array):
"""
:param position: The position vector of the object as a numpy array.
:param rotation: The rotation quaternion of the object as a numpy array.
:param forward: The forward directional vector of the object as a numpy array.
"""
""":field
The position vector of the object as a numpy array: `[x, y, z]` The position of each object is the bottom-center point of the object. The position of each Magnebot body part is in the exact center of the body part. `y` is the up direction.
"""
self.position = position
""":field
The rotation quaternion of the object as a numpy array: `[x, y, z, w]` See: [`tdw.tdw_utils.QuaternionUtils`](https://github.com/threedworld-mit/tdw/blob/master/Documentation/python/tdw_utils.md#quaternionutils).
"""
self.rotation = rotation
""":field
The forward directional vector of the object as a numpy array: `[x, y, z]`
"""
self.forward = forward
|
{"hexsha": "d4c823a8bde45ea8c34e59f11709e32811830ecd", "size": 1209, "ext": "py", "lang": "Python", "max_stars_repo_path": "magnebot/transform.py", "max_stars_repo_name": "neuroailab/magnebot", "max_stars_repo_head_hexsha": "3f537fcd95685efeadf7200208a310a4c6a2f10c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "magnebot/transform.py", "max_issues_repo_name": "neuroailab/magnebot", "max_issues_repo_head_hexsha": "3f537fcd95685efeadf7200208a310a4c6a2f10c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "magnebot/transform.py", "max_forks_repo_name": "neuroailab/magnebot", "max_forks_repo_head_hexsha": "3f537fcd95685efeadf7200208a310a4c6a2f10c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6896551724, "max_line_length": 247, "alphanum_fraction": 0.6451612903, "include": true, "reason": "import numpy", "num_tokens": 286}
|
cc ------------ dpmjet3.4 - authors: S.Roesler, R.Engel, J.Ranft -------
cc -------- phojet1.12-40 - authors: S.Roesler, R.Engel, J.Ranft -------
cc - oct'13 -------
cc ----------- pythia-6.4 - authors: Torbjorn Sjostrand, Lund'10 -------
cc ---------------------------------------------------------------------
cc converted for use with FLUKA -------
cc - oct'13 -------
C...PYINIT
C...Initializes the generation procedure; finds maxima of the
C...differential cross-sections to be used for weighting.
SUBROUTINE PYINIT(FRAME,BEAM,TARGET,WIN)
C...Double precision and integer declarations.
IMPLICIT DOUBLE PRECISION(A-H, O-Z)
IMPLICIT INTEGER(I-N)
C...Commonblocks.
include 'inc/pydat1'
include 'inc/pydat2'
include 'inc/pydat3'
include 'inc/pydat4'
include 'inc/pysubs'
include 'inc/pypars'
include 'inc/pyint1'
include 'inc/pyint2'
include 'inc/pyint5'
include 'inc/pypued'
C...Local arrays and character variables.
DIMENSION ALAMIN(20),NFIN(20)
CHARACTER*(*) FRAME,BEAM,TARGET
CHARACTER CHFRAM*12,CHBEAM*12,CHTARG*12,CHLH(2)*6
C...Interface to PDFLIB.
include 'inc/w50511'
include 'inc/w50512'
DOUBLE PRECISION VALUE(20)
CHARACTER*20 PARM(20)
*
EXTERNAL PYDATA
*
DATA VALUE/20*0D0/,PARM/20*' '/
C...Data:Lambda and n_f values for parton distributions..
DATA ALAMIN/0.177D0,0.239D0,0.247D0,0.2322D0,0.248D0,0.248D0,
&0.192D0,0.326D0,2*0.2D0,0.2D0,0.2D0,0.29D0,0.2D0,0.4D0,5*0.2D0/,
&NFIN/20*4/
DATA CHLH/'lepton','hadron'/
C...Check that BLOCK DATA PYDATA has been loaded.
CALL PYCKBD
C...Reset MINT and VINT arrays. Write headers.
MSTI(53)=0
DO 100 J=1,400
MINT(J)=0
VINT(J)=0D0
100 CONTINUE
IF(MSTU(12).NE.12345) CALL PYLIST(0)
IF(MSTP(122).GE.1) WRITE(MSTU(11),5100)
C...Reset error counters.
MSTU(23)=0
MSTU(27)=0
MSTU(30)=0
C...Reset processes that should not be on.
MSUB(96)=0
MSUB(97)=0
C...Select global FSR/ISR/UE parameter set = 'tune'
C...See routine PYTUNE for details
IF (MSTP(5).NE.0) THEN
MSTP5=MSTP(5)
CALL PYTUNE(MSTP5)
ENDIF
C...Call user process initialization routine.
IF(FRAME(1:1).EQ.'u'.OR.FRAME(1:1).EQ.'U') THEN
MSEL=0
CALL UPINIT
MSEL=0
ENDIF
C...Maximum 4 generations; set maximum number of allowed flavours.
MSTP(1)=MIN(4,MSTP(1))
MSTU(114)=MIN(MSTU(114),2*MSTP(1))
MSTP(58)=MIN(MSTP(58),2*MSTP(1))
C...Sum up Cabibbo-Kobayashi-Maskawa factors for each quark/lepton.
DO 120 I=-20,20
VINT(180+I)=0D0
IA=ABS(I)
IF(IA.GE.1.AND.IA.LE.2*MSTP(1)) THEN
DO 110 J=1,MSTP(1)
IB=2*J-1+MOD(IA,2)
IF(IB.GE.6.AND.MSTP(9).EQ.0) GOTO 110
IPM=(5-SIGN(1,I))/2
IDC=J+MDCY(IA,2)+2
IF(MDME(IDC,1).EQ.1.OR.MDME(IDC,1).EQ.IPM) VINT(180+I)=
& VINT(180+I)+VCKM((IA+1)/2,(IB+1)/2)
110 CONTINUE
ELSEIF(IA.GE.11.AND.IA.LE.10+2*MSTP(1)) THEN
VINT(180+I)=1D0
ENDIF
120 CONTINUE
C...Initialize parton distributions: PDFLIB.
IF(MSTP(52).EQ.2) THEN
PARM(1)='NPTYPE'
VALUE(1)=1
PARM(2)='NGROUP'
VALUE(2)=MSTP(51)/1000
PARM(3)='NSET'
VALUE(3)=MOD(MSTP(51),1000)
PARM(4)='TMAS'
VALUE(4)=PMAS(6,1)
CALL PDFSET(PARM,VALUE)
MINT(93)=1000000+MSTP(51)
ENDIF
C...Choose Lambda value to use in alpha-strong.
MSTU(111)=MSTP(2)
IF(MSTP(3).GE.2) THEN
ALAM=0.2D0
NF=4
IF(MSTP(52).EQ.1.AND.MSTP(51).GE.1.AND.MSTP(51).LE.20) THEN
ALAM=ALAMIN(MSTP(51))
NF=NFIN(MSTP(51))
ELSEIF(MSTP(52).EQ.2.AND.NFL.EQ.5) THEN
ALAM=QCDL5
NF=5
ELSEIF(MSTP(52).EQ.2) THEN
ALAM=QCDL4
NF=4
ENDIF
PARP(1)=ALAM
PARP(61)=ALAM
PARP(72)=ALAM
PARU(112)=ALAM
MSTU(112)=NF
IF(MSTP(3).EQ.3) PARJ(81)=ALAM
ENDIF
C...Initialize the UED masses and widths
IF (IUED(1).EQ.1) CALL PYXDIN
C...Initialize the SUSY generation: couplings, masses,
C...decay modes, branching ratios, and so on.
CALL PYMSIN
C...Initialize widths and partial widths for resonances.
CALL PYINRE
C...Set Z0 mass and width for e+e- routines.
PARJ(123)=PMAS(23,1)
PARJ(124)=PMAS(23,2)
C...Identify beam and target particles and frame of process.
CHFRAM=FRAME//' '
CHBEAM=BEAM//' '
CHTARG=TARGET//' '
CALL PYINBM(CHFRAM,CHBEAM,CHTARG,WIN)
IF(MINT(65).EQ.1) GOTO 170
C...For gamma-p or gamma-gamma allow many (3 or 6) alternatives.
C...For e-gamma allow 2 alternatives.
MINT(121)=1
IF(MSTP(14).EQ.10.AND.(MSEL.EQ.1.OR.MSEL.EQ.2)) THEN
IF((MINT(11).EQ.22.OR.MINT(12).EQ.22).AND.
& (ABS(MINT(11)).GT.100.OR.ABS(MINT(12)).GT.100)) MINT(121)=3
IF(MINT(11).EQ.22.AND.MINT(12).EQ.22) MINT(121)=6
IF((MINT(11).EQ.22.OR.MINT(12).EQ.22).AND.
& (ABS(MINT(11)).EQ.11.OR.ABS(MINT(12)).EQ.11)) MINT(121)=2
ELSEIF(MSTP(14).EQ.20.AND.(MSEL.EQ.1.OR.MSEL.EQ.2)) THEN
IF((MINT(11).EQ.22.OR.MINT(12).EQ.22).AND.
& (ABS(MINT(11)).GT.100.OR.ABS(MINT(12)).GT.100)) MINT(121)=3
IF(MINT(11).EQ.22.AND.MINT(12).EQ.22) MINT(121)=9
ELSEIF(MSTP(14).EQ.25.AND.(MSEL.EQ.1.OR.MSEL.EQ.2)) THEN
IF((MINT(11).EQ.22.OR.MINT(12).EQ.22).AND.
& (ABS(MINT(11)).GT.100.OR.ABS(MINT(12)).GT.100)) MINT(121)=2
IF(MINT(11).EQ.22.AND.MINT(12).EQ.22) MINT(121)=4
ELSEIF(MSTP(14).EQ.30.AND.(MSEL.EQ.1.OR.MSEL.EQ.2)) THEN
IF((MINT(11).EQ.22.OR.MINT(12).EQ.22).AND.
& (ABS(MINT(11)).GT.100.OR.ABS(MINT(12)).GT.100)) MINT(121)=4
IF(MINT(11).EQ.22.AND.MINT(12).EQ.22) MINT(121)=13
ENDIF
MINT(123)=MSTP(14)
IF((MSTP(14).EQ.10.OR.MSTP(14).EQ.20.OR.MSTP(14).EQ.25.OR.
&MSTP(14).EQ.30).AND.MSEL.NE.1.AND.MSEL.NE.2) MINT(123)=0
IF(MSTP(14).GE.11.AND.MSTP(14).LE.19) THEN
IF(MSTP(14).EQ.11) MINT(123)=0
IF(MSTP(14).EQ.12.OR.MSTP(14).EQ.14) MINT(123)=5
IF(MSTP(14).EQ.13.OR.MSTP(14).EQ.17) MINT(123)=6
IF(MSTP(14).EQ.15) MINT(123)=2
IF(MSTP(14).EQ.16.OR.MSTP(14).EQ.18) MINT(123)=7
IF(MSTP(14).EQ.19) MINT(123)=3
ELSEIF(MSTP(14).GE.21.AND.MSTP(14).LE.24) THEN
IF(MSTP(14).EQ.21) MINT(123)=0
IF(MSTP(14).EQ.22.OR.MSTP(14).EQ.23) MINT(123)=4
IF(MSTP(14).EQ.24) MINT(123)=1
ELSEIF(MSTP(14).GE.26.AND.MSTP(14).LE.29) THEN
IF(MSTP(14).EQ.26.OR.MSTP(14).EQ.28) MINT(123)=8
IF(MSTP(14).EQ.27.OR.MSTP(14).EQ.29) MINT(123)=9
ENDIF
C...Set up kinematics of process.
CALL PYINKI(0)
C...Set up kinematics for photons inside leptons.
IF(MINT(141).NE.0.OR.MINT(142).NE.0) CALL PYGAGA(1,WTGAGA)
C...Precalculate flavour selection weights.
CALL PYKFIN
C...Loop over gamma-p or gamma-gamma alternatives.
CKIN3=CKIN(3)
MSAV48=0
DO 160 IGA=1,MINT(121)
CKIN(3)=CKIN3
MINT(122)=IGA
C...Select partonic subprocesses to be included in the simulation.
CALL PYINPR
MINT(101)=1
MINT(102)=1
MINT(103)=MINT(11)
MINT(104)=MINT(12)
C...Count number of subprocesses on.
MINT(48)=0
DO 130 ISUB=1,500
IF(MINT(50).EQ.0.AND.ISUB.GE.91.AND.ISUB.LE.96.AND.
& MSUB(ISUB).EQ.1.AND.MINT(121).GT.1) THEN
MSUB(ISUB)=0
ELSEIF(MINT(50).EQ.0.AND.ISUB.GE.91.AND.ISUB.LE.96.AND.
& MSUB(ISUB).EQ.1) THEN
WRITE(MSTU(11),5200) ISUB,CHLH(MINT(41)),CHLH(MINT(42))
CALL PYSTOP(1)
ELSEIF(MSUB(ISUB).EQ.1.AND.ISET(ISUB).EQ.-1) THEN
WRITE(MSTU(11),5300) ISUB
CALL PYSTOP(1)
ELSEIF(MSUB(ISUB).EQ.1.AND.ISET(ISUB).LE.-2) THEN
WRITE(MSTU(11),5400) ISUB
CALL PYSTOP(1)
ELSEIF(MSUB(ISUB).EQ.1) THEN
MINT(48)=MINT(48)+1
ENDIF
130 CONTINUE
C...Stop or raise warning flag if no subprocesses on.
IF(MINT(121).EQ.1.AND.MINT(48).EQ.0) THEN
IF(MSTP(127).NE.1) THEN
WRITE(MSTU(11),5500)
CALL PYSTOP(1)
ELSE
WRITE(MSTU(11),5700)
MSTI(53)=1
ENDIF
ENDIF
MINT(49)=MINT(48)-MSUB(91)-MSUB(92)-MSUB(93)-MSUB(94)
MSAV48=MSAV48+MINT(48)
C...Reset variables for cross-section calculation.
DO 150 I=0,500
DO 140 J=1,3
NGEN(I,J)=0
XSEC(I,J)=0D0
140 CONTINUE
150 CONTINUE
C...Find parametrized total cross-sections.
CALL PYXTOT
VINT(318)=VINT(317)
C...Maxima of differential cross-sections.
IF(MSTP(121).LE.1) CALL PYMAXI
C...Initialize possibility of pileup events.
IF(MINT(121).GT.1) MSTP(131)=0
IF(MSTP(131).NE.0) CALL PYPILE(1)
C...Initialize multiple interactions with variable impact parameter.
IF(MINT(50).EQ.1) THEN
PTMN=PARP(82)*(VINT(1)/PARP(89))**PARP(90)
IF(MOD(MSTP(81),10).EQ.0.AND.(CKIN(3).GT.PTMN.OR.
& ((MSEL.NE.1.AND.MSEL.NE.2)))) MSTP(82)=MIN(1,MSTP(82))
IF((MINT(49).NE.0.OR.MSTP(131).NE.0).AND.MSTP(82).GE.2) THEN
MINT(35)=1
CALL PYMULT(1)
MINT(35)=3
CALL PYMIGN(1)
ENDIF
ENDIF
C...Save results for gamma-p and gamma-gamma alternatives.
IF(MINT(121).GT.1) CALL PYSAVE(1,IGA)
160 CONTINUE
C...Initialization finished.
IF(MSAV48.EQ.0) THEN
IF(MSTP(127).NE.1) THEN
WRITE(MSTU(11),5500)
CALL PYSTOP(1)
ELSE
WRITE(MSTU(11),5700)
MSTI(53)=1
ENDIF
ENDIF
170 IF(MSTP(122).GE.1) WRITE(MSTU(11),5600)
C...Formats for initialization information.
5100 FORMAT('1',18('*'),1X,'PYINIT: initialization of PYTHIA ',
&'routines',1X,17('*'))
5200 FORMAT(1X,'Error: process number ',I3,' not meaningful for ',A6,
&'-',A6,' interactions.'/1X,'Execution stopped!')
5300 FORMAT(1X,'Error: requested subprocess',I4,' not implemented.'/
&1X,'Execution stopped!')
5400 FORMAT(1X,'Error: requested subprocess',I4,' not existing.'/
&1X,'Execution stopped!')
5500 FORMAT(1X,'Error: no subprocess switched on.'/
&1X,'Execution stopped.')
5600 FORMAT(/1X,22('*'),1X,'PYINIT: initialization completed',1X,
&22('*'))
5700 FORMAT(1X,'Error: no subprocess switched on.'/
&1X,'Execution will stop if you try to generate events.')
RETURN
END
|
{"hexsha": "7005f2f086d353baaa983d9e44f4f94921625702", "size": 10804, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/pythia/pyinit.f", "max_stars_repo_name": "pzhristov/DPMJET", "max_stars_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-06-15T01:59:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-01T08:39:13.000Z", "max_issues_repo_path": "src/pythia/pyinit.f", "max_issues_repo_name": "pzhristov/DPMJET", "max_issues_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-15T09:53:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T20:52:28.000Z", "max_forks_repo_path": "src/pythia/pyinit.f", "max_forks_repo_name": "pzhristov/DPMJET", "max_forks_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-07-05T02:44:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T20:49:05.000Z", "avg_line_length": 32.6404833837, "max_line_length": 72, "alphanum_fraction": 0.5657164013, "num_tokens": 4084}
|
module PointCloud
using jInv.Mesh
using jInv.Utils
using jInv.InverseSolve
using ShapeReconstructionPaLS.Utils
using ShapeReconstructionPaLS.ParamLevelSet
using MAT
using SparseArrays
using Distributed
import jInv.ForwardShare.getData
import jInv.ForwardShare.getSensTMatVec
import jInv.ForwardShare.getSensMatVec
import jInv.ForwardShare.ForwardProbType
## The rotation planes are:
# first rotation (theta) is for planes x1 and x2
# second rotation (phi) is for planes x1 and x3
# Sampling is done by summing planes of x1 and x2 for all values of x3.
export PointCloudParam, getPointCloudParam
mutable struct PointCloudParam <: ForwardProbType
Mesh :: RegularMesh
P :: Array{Array{Float64,2}}
Normals :: Array{Array{Float64,2}}
npcAll :: Int64
workerSubIdxs
theta_phi_rad :: Array{Float64,2}
b :: Array{Float64,2}
method :: String
Jacobian :: SparseMatrixCSC{Float32,Int32}
end
function getPointCloudParamInternal(Mesh::RegularMesh, P:: Array{Array{Float64,2}}, Normals::Array{Array{Float64,2}},npcAll::Int64,workerSubIdxs,theta_phi_rad::Array{Float64,2} ,b::Array{Float64,2},method)
return PointCloudParam(Mesh::RegularMesh,P::Array{Array{Float64,2}}, Normals::Array{Array{Float64,2}}, npcAll, workerSubIdxs, theta_phi_rad::Array{Float64,2} ,b::Array{Float64,2}, method,spzeros(Float32,Int32,0,0));
end
function getPointCloudParam(Mesh::RegularMesh,P::Array{Array{Float64,2}}, Normals::Array{Array{Float64,2}},theta_phi_rad::Array{Float64,2} ,b::Array{Float64,2},numWorkers::Int64,method = MATBased)
## This function does use the parallel mechanism of jInv (i.e., returns a RemoteChannel), even if numWorkers=1.
if numWorkers > nworkers()
numWorkers = nworkers();
end
SourcesSubInd = Array{Array{Int64,1}}(undef,numWorkers);
ActualWorkers = workers();
if numWorkers < nworkers()
ActualWorkers = ActualWorkers[1:numWorkers];
end
pFor = Array{RemoteChannel}(undef,numWorkers);
i = 1; nextidx() = (idx=i; i+=1; idx)
idx=i;
npc = 2
# send out jobs
@sync begin
for w = ActualWorkers
@async begin
while true
idx = nextidx();
if idx > numWorkers
break
end
I_p = getIndicesOfKthWorker(numWorkers,idx,npc);
tmp = initRemoteChannel(getPointCloudParamInternal,w,Mesh::RegularMesh,P::Array{Array{Float64,2}}, Normals::Array{Array{Float64,2}},npc ,I_p,theta_phi_rad::Array{Float64,2} ,b::Array{Float64,2},method);
pFor[1] = initRemoteChannel(getPointCloudParamInternal,w,Mesh::RegularMesh,P::Array{Array{Float64,2}}, Normals::Array{Array{Float64,2}},npc,I_p,theta_phi_rad::Array{Float64,2} ,b::Array{Float64,2},method);
wait(pFor[1]);
end
end
end
end
return pFor # Array of Remote Refs
end
import jInv.Utils.clear!
function clear!(pFor::PointCloudParam)
pFor.SampleMat = spzeros(0);
clear!(pFor.Mesh);
return pFor;
end
include("prepareSyntheticPointCloudData.jl");
include("getData.jl");
include("sensitivityFuncs.jl");
end
|
{"hexsha": "715f19ff0f7466674fbebb1a58874618b4bd1576", "size": 2965, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/PointCloud/PointCloud.jl", "max_stars_repo_name": "BGUCompSci/ShapeReconstructionPaLS", "max_stars_repo_head_hexsha": "725cfa2a2ab357b4f2ed564eb2227158efc07f7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-09-26T17:47:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-16T19:13:36.000Z", "max_issues_repo_path": "src/PointCloud/PointCloud.jl", "max_issues_repo_name": "BGUCompSci/ShapeReconstructionPaLS", "max_issues_repo_head_hexsha": "725cfa2a2ab357b4f2ed564eb2227158efc07f7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-10T17:52:57.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-10T17:52:57.000Z", "max_forks_repo_path": "src/PointCloud/PointCloud.jl", "max_forks_repo_name": "BGUCompSci/ShapeReconstructionPaLS", "max_forks_repo_head_hexsha": "725cfa2a2ab357b4f2ed564eb2227158efc07f7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-03T02:39:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-03T02:39:16.000Z", "avg_line_length": 35.2976190476, "max_line_length": 216, "alphanum_fraction": 0.7403035413, "num_tokens": 907}
|
## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
#
# Copyright (C) 2000-2008 greg Landrum
#
""" Training algorithms for feed-forward neural nets
Unless noted otherwise, algorithms and notation are taken from:
"Artificial Neural Networks: Theory and Applications",
Dan W. Patterson, Prentice Hall, 1996
"""
from __future__ import print_function
import numpy
from rdkit.six.moves import xrange
class Trainer(object):
""" "virtual base class" for network trainers
"""
pass
class BackProp(Trainer):
"""implement back propagation (algorithm on pp 153-154 of Patterson)
I don't *think* that I've made any assumptions about the connectivity of
the net (i.e. full connectivity between layers is not required).
**NOTE:** this code is currently making the assumption that the activation
functions on the nodes in the network are capable of calculating their
derivatives using only their values (i.e. a DerivFromVal method should
exist). This shouldn't be too hard to change.
"""
def StepUpdate(self, example, net, resVect=None):
""" does a BackProp step based upon the example
**Arguments**
- example: a 2-tuple:
1) a list of variable values values
2) a list of result values (targets)
- net: a _Network_ (or something supporting the same API)
- resVect: if this is nonzero, then the network is not required to
classify the _example_
**Returns**
the backprop error from _network_ **before the update**
**Note**
In case it wasn't blindingly obvious, the weights in _network_ are modified
in the course of taking a backprop step.
"""
totNumNodes = net.GetNumNodes()
if self.oldDeltaW is None:
self.oldDeltaW = numpy.zeros(totNumNodes, numpy.float64)
outputNodeList = net.GetOutputNodeList()
nOutput = len(outputNodeList)
targetVect = numpy.array(example[-nOutput:], numpy.float64)
trainVect = example[:-nOutput]
if resVect is None:
# classify the example
net.ClassifyExample(trainVect)
resVect = net.GetLastOutputs()
outputs = numpy.take(resVect, outputNodeList)
errVect = targetVect - outputs
delta = numpy.zeros(totNumNodes, numpy.float64)
# start with the output layer
for i in xrange(len(outputNodeList)):
idx = outputNodeList[i]
node = net.GetNode(idx)
# the deltas here are easy
delta[idx] = errVect[i] * node.actFunc.DerivFromVal(resVect[idx])
# use these results to start working on the deltas of the preceding layer
inputs = node.GetInputs()
weights = delta[idx] * node.GetWeights()
for j in xrange(len(inputs)):
idx2 = inputs[j]
delta[idx2] = delta[idx2] + weights[j]
# now propagate the deltas backwards
for layer in xrange(net.GetNumHidden() - 1, -1, -1):
nodesInLayer = net.GetHiddenLayerNodeList(layer)
for idx in nodesInLayer:
node = net.GetNode(idx)
# start by finishing off the error term for this guy
delta[idx] = delta[idx] * node.actFunc.DerivFromVal(resVect[idx])
# and then propagate our errors to the preceding layer
if layer != 0:
inputs = node.GetInputs()
weights = delta[idx] * node.GetWeights()
for i in xrange(len(inputs)):
idx2 = inputs[i]
delta[idx2] = delta[idx2] + weights[i]
# okey dokey... we've now got the deltas for each node, use those
# to update the weights (whew!)
nHidden = net.GetNumHidden()
for layer in xrange(0, nHidden + 1):
if layer == nHidden:
idxList = net.GetOutputNodeList()
else:
idxList = net.GetHiddenLayerNodeList(layer)
for idx in idxList:
node = net.GetNode(idx)
dW = self.speed * delta[idx] * numpy.take(resVect, node.GetInputs())
newWeights = node.GetWeights() + dW
node.SetWeights(newWeights)
# return the RMS error from the OLD network
return numpy.sqrt(errVect * errVect)[0]
def TrainOnLine(self, examples, net, maxIts=5000, errTol=0.1, useAvgErr=1, silent=0):
""" carries out online training of a neural net
The definition of online training is that the network is updated after
each example is presented.
**Arguments**
- examples: a list of 2-tuple:
1) a list of variable values values
2) a list of result values (targets)
- net: a _Network_ (or something supporting the same API)
- maxIts: the maximum number of *training epochs* (see below for definition) to be
run
- errTol: the tolerance for convergence
- useAvgErr: if this toggle is nonzero, then the error at each step will be
divided by the number of training examples for the purposes of checking
convergence.
- silent: controls the amount of visual noise produced as this runs.
**Note**
a *training epoch* is one complete pass through all the training examples
"""
nExamples = len(examples)
converged = 0
cycle = 0
while (not converged) and (cycle < maxIts):
maxErr = 0
newErr = 0
#print('bp: ',cycle)
for example in examples:
localErr = self.StepUpdate(example, net)
newErr += localErr
if localErr > maxErr:
maxErr = localErr
if useAvgErr == 1:
newErr = newErr / nExamples
else:
newErr = maxErr
#print('\t',newErr,errTol)
if newErr <= errTol:
converged = 1
# if cycle % 10 == 0 and not silent:
if not silent:
print('epoch %d, error: % 6.4f' % (cycle, newErr))
cycle = cycle + 1
if not silent:
if converged:
print('Converged after %d epochs.' % cycle)
else:
print('NOT Converged after %d epochs.' % cycle)
print('final error: % 6.4f' % newErr)
def __init__(self, speed=0.5, momentum=0.7):
""" Constructor
**Arguments**
- speed: the speed parameter for back prop training
- momentum: the momentum term for back prop training
*Not currently used*
"""
self.speed = speed
self.momentum = momentum
self.oldDeltaW = None
if __name__ == '__main__':
from rdkit.ML.Neural import Network
def testAnd():
examples = [[[0, 0, 1], [0.1]], [[0, 1, 1], [.1]], [[1, 0, 1], [.1]], [[1, 1, 1], [.9]]]
net = Network.Network([3, 1])
t = BackProp()
t.TrainOnLine(examples, net)
return net
def testOr():
examples = [[[0, 0, 1], [0.1]], [[0, 1, 1], [.9]], [[1, 0, 1], [.9]], [[1, 1, 1], [.9]]]
net = Network.Network([3, 1])
t = BackProp()
t.TrainOnLine(examples, net, maxIts=1000, useAvgErr=0)
print('classifications:')
for example in examples:
res = net.ClassifyExample(example[0])
print('%f -> %f' % (example[1][0], res))
return net
def testXor():
examples = [[[0, 0, 1], [.1]], [[0, 1, 1], [.9]], [[1, 0, 1], [.9]], [[1, 1, 1], [.1]]]
net = Network.Network([3, 3, 1])
t = BackProp(speed=.8)
t.TrainOnLine(examples, net, errTol=0.2)
return net
def testLinear():
examples = [
[.1, .1],
[.2, .2],
[.3, .3],
[.4, .4],
[.8, .8],
]
net = Network.Network([1, 2, 1])
t = BackProp(speed=.8)
t.TrainOnLine(examples, net, errTol=0.1, useAvgErr=0)
print('classifications:')
for example in examples:
res = net.ClassifyExample(example[:-1])
print('%f -> %f' % (example[-1], res))
return net
def runProfile(command):
import random
random.seed(23)
import profile, pstats
datFile = '%s.prof.dat' % (command)
profile.run('%s()' % command, datFile)
stats = pstats.Stats(datFile)
stats.strip_dirs()
stats.sort_stats('time').print_stats()
if 0:
net = testXor()
print('Xor:', net)
from rdkit.six.moves import cPickle
outF = open('xornet.pkl', 'wb+')
cPickle.dump(net, outF)
outF.close()
else:
#runProfile('testLinear')
net = testLinear()
#net = testOr()
|
{"hexsha": "0459edcf8046da769d854385ea4a34ecdd7ae4a9", "size": 8146, "ext": "py", "lang": "Python", "max_stars_repo_path": "rdkit/ML/Neural/Trainers.py", "max_stars_repo_name": "darkreactions/rdkit", "max_stars_repo_head_hexsha": "0c388029c1f9386d832f6c321e59a11589c373d8", "max_stars_repo_licenses": ["PostgreSQL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rdkit/ML/Neural/Trainers.py", "max_issues_repo_name": "darkreactions/rdkit", "max_issues_repo_head_hexsha": "0c388029c1f9386d832f6c321e59a11589c373d8", "max_issues_repo_licenses": ["PostgreSQL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rdkit/ML/Neural/Trainers.py", "max_forks_repo_name": "darkreactions/rdkit", "max_forks_repo_head_hexsha": "0c388029c1f9386d832f6c321e59a11589c373d8", "max_forks_repo_licenses": ["PostgreSQL"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-12-04T02:28:18.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-29T01:18:46.000Z", "avg_line_length": 29.8388278388, "max_line_length": 92, "alphanum_fraction": 0.6135526639, "include": true, "reason": "import numpy", "num_tokens": 2226}
|
\documentclass[]{article}
%opening
\title{Learning neural Question Answering Systems for Low-resource Langauges}
\author{C.W, R.H}
\usepackage{graphicx}
\begin{document}
\begin{titlepage} % Suppresses headers and footers on the title page
\centering % Centre everything on the title page
%------------------------------------------------
% Top rules
%------------------------------------------------
\rule{\textwidth}{1pt} % Thick horizontal rule
\vspace{2pt}\vspace{-\baselineskip} % Whitespace between rules
\rule{\textwidth}{0.4pt} % Thin horizontal rule
\vspace{0.1\textheight} % Whitespace between the top rules and title
%------------------------------------------------
% Title
%------------------------------------------------
{\Huge Monash University}\\[0.5\baselineskip] % Title line 1
\vspace{0.1\textheight}
{\Large Learning neural Question Answering Systems for Low-resource Langauges}\\[0.5\baselineskip] % Title line 1
\vspace{0.025\textheight} % Whitespace between the title and short horizontal rule
\rule{0.3\textwidth}{0.4pt} % Short horizontal rule under the title
\vspace{0.15\textheight} % Whitespace between the thin horizontal rule and the author name
%------------------------------------------------
% Author
%------------------------------------------------
{This thesis is presented in partial fulfillment of the requirements for the degree of Bachelor of Information Technology (Honours) at Monash University}
\vspace{0.05\textheight}
{By:}\\
\vspace{0.01\textheight}
{\Large \textsc{Chenyang Wang}} % Author name
\vspace{0.05\textheight}
{Supervisor:}\\
\vspace{0.01\textheight}
{\Large \textsc{Reza Haffari}} % Author name
\vspace{0.05\textheight}
{Year:}\\
\vspace{0.01\textheight}
{\Large \textsc{2017}}
\vfill % Whitespace between the author name and publisher
%------------------------------------------------
% Bottom rules
%------------------------------------------------
\rule{\textwidth}{0.4pt} % Thin horizontal rule
\vspace{2pt}\vspace{-\baselineskip} % Whitespace between rules
\rule{\textwidth}{1pt} % Thick horizontal rule
\end{titlepage}
\newpage
\begin{abstract}
Neural NLP in question answering has seen plenty of development in recent years, yet training data availability is still the main bottleneck of developing new neural QA systems. This is especially true for low-resource languages where dataset availability is highly limited. In this research we explore options to perform transfer learning from a resource-rich language to a resource-constrained language, including word embedding alignment, model fine-tuning, joint training and inducing shared representations with adversarial training. We evaluate the cross-lingual transfer learning performance of a combination of these methods on a English/Spanish bilingual question answering dataset. We found that for the QA problem being studied, fine-tuning with embedding alignment provides significant cross-lingual performance gain, and an adversarial training-based method also provide reasonable performance gain over monolingual models.
\end{abstract}
\newpage
\section*{Declartion}
I declare that this thesis is my own work and has not been submitted in any form for another degree or diploma at any university or other institute of tertiary education. Information derived from the work of others has been acknowledged.
\newline\newline\newline
Signed by \space \includegraphics[width=7em]{media/sig.png}
\newline\newline\newline
Chenyang Wang
\newline
\date{11/09/2017}
\newpage
\tableofcontents
\newpage
\section{Introduction}
Text understanding and question answering have always been among some of the most challenging tasks in natural language processing. With the rise of deep learning and DNN-based learning algorithms, as well as the increasing availability of large training datasets, neural network-based QA has been steadily gaining traction. A competent question answering system has a wide variety of practical uses in areas such as automated online self-service, intelligent web search, AI personal assistants etc. In recent years, many influential models have been proposed in this field, such as Memory Networks \cite{sukhbaatar2015end}, Dynamic Memory Networks \cite{kumar2016ask} and Differentiable Neural Computers (DNC) \cite{graves2016hybrid}. These models are capable of tackling a large range of problem types, such as text comprehension, simple logical reasoning, and even graph-based reasoning. These systems are often capable of delivering much-improved performances compared to older traditional NLP-based systems. However, just like other deep learning-based solutions, the training of these systems requires a large amount of input data. This is challenging in many NLP-problems, including question answering, because labelled datasets for a given task are usually still limited in both quality and quantity, especially when human annotation is required. This problem is even more profound when developing models for languages without many existing special-purpose datasets. For tasks such as text comprehension, there is a dataset bottleneck for training models in most languages other than English. This is challenging for anyone who wishes to apply the latest research in neural question answering (and neural NLP in general) to these languages. However, considering that there exist some common linguistic features among different languages and the same task in different languages may also share some similarities, it inspires us to consider the possibility of transferring the knowledge learned on a resource-rich language (such as English) to relatively resource-poor languages so that we can train a higher-performance model with only limited data in the target language. In this research, we analyse the difficulties of training a QA system on low-resource languages, propose two different approaches to transfer learning, examine their effectiveness and discuss their implications. We found that for a simple text comprehension task, using a pre-trained English model with fine-tuning on the target language and alignment of word embeddings achieves a significant performance increase for the target language, while not requiring a large special-purpose dataset in the target language. This indicates that it is indeed possible to use relatively cheap cross-lingual transfer learning to assist the training of QA models in low-resource languages.
\section{Background}
Natural language question answering has been an active research field since the 1960s. Over the years, the goal and scope of the problem has changed several times depending on the target use case and technical capabilities of the time. Hirschman and Gaizauskas \cite{hirschman2001natural} define a question answering system as one that “allows a user to ask a question in everyday language and receive an answer quickly and succinctly, with sufficient context to validate the answer.” Andrenucci and Sneiders \cite{andrenucci2005automated} define the problem as “the process of retrieving precise answers to natural language (NL) questions”. These definitions fit several different sub-problems in QA research, such as:
\begin{itemize}
\item
Natural language QA frontend for databases / knowledge bases, which focuses on the processing of natural language questions and retrieval of answers stored in structured data
\item
Information retrieval, which focuses on searching for relevant documents from a large collection of documents (such as the task of a web search engine)
\item
Text comprehension, which focuses on answering questions based on facts presented in a natural language form
\item
etc.
\end{itemize}
\subsection{Brief History of Question Answering in NLP}
Some well-known early researches on natural language question answering were conducted in the 1960-70s, with limited success in providing a natural language frontend to structured knowledge bases within a narrow domain \cite{hirschman2001natural}.
Early work on text comprehension started in the late 1970s, such as Lehnert’s theory of question answering \cite{lehnert1977conceptual}, which draws comparison between machine text comprehension and human comprehension, and outlines some basic requirements for a machine comprehension system to succeed.
Prior to the rapid improvement of neural network-based NLP solutions, most question answering solutions can be categorised into three groups: NLP-based QA, information retrieval QA and template-based QA \cite{andrenucci2005automated}. The comparison of these techniques, along with earlier database NL frontends and deep learning approaches are shown in table \ref{table:comparison}.
\begin{center}
\begin{table}
\begin{tabular}{| p{2.5cm} || p{2cm} | p{2cm} | l | l |}
\hline
& Input & Knowledge Source & Output & Domain \\
\hline
Early database NL frontends & Semi-structured & Highly structured, limited & Accurate & Narrow \\
\hline
Traditional NLP QA systems & Natural & Structured, limited & Accurate & Narrow \\
\hline
Information Retrieval Techniques & Natural & Unstructured, large, redundant & Low accuracy & Broad \\
\hline
Templates & Structured & Structured & Low accuracy & Narrow \\
\hline
Deep Learning & Natural & Structured or unstructured & Accurate & Data dependent \\
\hline
\end{tabular}
\caption{Comparison of QA systems in NLP (based on \cite{andrenucci2005automated})}\label{table:comparison}
\end{table}
\end{center}
The greatest strength of traditional NLP system is that they tend to incorporate and exploit research done in linguistics and corpus analysis. As classical theoretical linguistics tends to focus on developing a rule-based model for the human language, it was convenient for early AI researchers to borrow these rules from linguistics and apply them to NLP tasks. However, in recent years, much of NLP research has shifted to statistical and machine learning-based approaches.
There are several limitations to the traditional NLP workflow for question answering, such as:
\begin{itemize}
\item
Each NLP module is usually designed individually for their specific roles, not for working together with other modules in a system. For instance, a module at the front of the workflow cannot easily adjust its output to provide more useful output for a module later in the workflow.
\item
The architecture of the system must be designed by experts in linguistics and NLP, yet one system cannot be easily adapted to perform a different task. This limits the viability of such systems in production use.
\item
Such a system mostly derives its language-related knowledge from pre-defined rulesets and language models rather than discovering the structure in the input documents.
\end{itemize}
\begin{center}
\begin{figure}
\includegraphics[width=\linewidth]{media/tradi.png}
\caption{Traditional NLP stack (from \cite{andrenucci2005automated})}
\label{fig:tradi}
\end{figure}
\end{center}
\subsection{Problem Formulation}
The focus of this research is question answering in the context of text comprehension. We formulate the definition of the problem as follows:
\begin{center}
\textit{Given one or more \textbf{natural language documents} containing a number of \textbf{facts} and a natural language \textbf{question}, find relevant facts in the input documents, perform necessary reasoning over the facts, and present the \textbf{answer} in either structured or natural language format.}
\end{center}
The above definition requires a QA system to be able to take natural language information sources and queries as input and perform at least simple logical inferences to extract the answer desired. Unlike information retrieval tasks, in text comprehension tasks, we usually restrict the allowed information sources to the input documents only.
In the context of cross-lingual transfer learning, we wish to solve this problem in one language, and use the knowledge learned in the source language to assist the training of a model on a target language, using documents and Q\&A pairs from both the source and target language.
We may simplify the transfer learning procedure as follows:
Let $L_1$, $L_2$ be two languages, $C_1$, $C_2$ be documents from the two languages respectively, $Q_1$, $Q_2$ be the questions and $A_1$, $A_2$ be the answers.
We learn a model on $L_1$:
\[ M_1(C_1, Q_1) \to A_1\]
Then we use $M_1$ and the $L_2$ data to learn a model in $L_2$, taking into account the shared knowledge with the same task in $L_1$:
\[ M_2(C_2, Q_2 \vline M_1) \to A_2\]
In later chapters we will also consider the cases where $M_1$ and $M_2$ are combined in the same model.
\section{Current Research in Neural QA}
While the traditional NLP systems often employ a rule-based system for parsing inputs and generating outputs, recent neural network-based NLP systems usually attempt to build a distributional model of the language and use that as a basis for solving various problems. The rise of deep neural networks sparked a whole new round of research into natural language processing. Specifically, the effectiveness of recurrent neural networks at sequence processing and the surprising usefulness of embedded word vectors enabled the direct (sometimes even end-to-end) application of neural network-based models in relatively complex NLP tasks such as sentence parsing, transcription, translating as well as question answering. In this section, we discuss the key concepts and techniques used in neural network NLP relevant to text comprehension and multilingual transfer learning.
\subsection{Datasets and Benchmarks}
To compare different question answering techniques as well as to validate the effectiveness of new models, many benchmarking datasets and tools have been developed and adopted by researchers over the years. For instance, the TREC (Text Retrieval Conference) datasets are widely used for information retrieval benchmarking. For text comprehension and reasoning over text data, traditionally there was a lack of large, high-quality datasets suitable for the task. In recent years, several new datasets have been proposed to meet these demands. The three sample datasets mentioned below represent three main types of question answering tasks: text-based reading comprehension (the focus of this research), logical reasoning / inference, and open questions.
\paragraph{CNN / Daily Mail }
The CNN / Daily Mail QA dataset was collected by Hermann, et al. \cite{hermann2015teaching} for developing their deep neural network-based question answerers. The dataset consists of more than 300k articles taken from CNN and Daily Mail websites. Questions on each article are built from the bullet points for these articles. In order to truly test an algorithm for its ability to extract information from the text itself rather than relying on “common sense knowledge” (such as those deduced from word co-occurrence), Hermann et al. also anonymised the named entities in the corpora.
In Chen, et al. \cite{chen2016thorough}, this dataset is studied for its effectiveness in evaluating text comprehension models. The authors conclude that this dataset is valuable for training QA models, however it has several limitations, such as noisy data, relatively simple reasoning tasks and limited room of improvement for future models. However, since it is relatively easy to construct similar datasets in a different language using the same data collection and preprocessing techniques (no human annotation needed), whereas many more recent, higher quality models are much more difficult to replicate in different languages, we believe this dataset is still valuable for developing cross-lingual transfer learning models.
\paragraph{bAbI}
The bAbI dataset was constructed by Weston, et al. \cite{weston2015towards} specifically for evaluating a model’s ability to reason over natural language evidences. The dataset generally consists of “stories” in which a set of simple statements are followed by a question based on previous statements. There are a total of 20 different categories of tasks, varying in the number of evidences needed for each question and the type of reasoning required. A sample of the bAbI data is given in figure \ref{fig:babi}.
\paragraph{TriviaQA}
TriviaQA \cite{joshi2017triviaqa} is a new dataset for question answering designed to overcome many of the shortcomings of older datasets, such as dataset scale, evidence type (i.e. type of reasoning required), syntactical variation, vocabulary size, etc. It is constructed by combining trivia questions with supplementary evidence documents collected from web searches and wiki pages. The quality and benchmarking effectiveness of this dataset are yet to be further tested.
\begin{center}
\begin{figure}
\fbox{\begin{minipage}{30em}
\begin{itemize}
\item[1] Bill went back to the cinema yesterday.
\item[2] Julie went to the school this morning.
\item[3] Fred went to the park yesterday.
\item[4] Yesterday Julie went to the office.
\item[5] Where was Julie before the school? office
\end{itemize}
\end{minipage}}
\caption{bAbI example (from \cite{hermann2015teaching})}
\label{fig:babi}
\end{figure}
\end{center}
\subsection{Neural Networks and Deep Learning in NLP}
\paragraph{Recurrent Network Architectures}
Recurrent neural networks are neural networks with cycles in its connections. Conceptually, the circular connections are usually unrolled and represented as a connection from the network in time step t-1 to t. This allows the network to pass state representations between time steps and therefore capture long-distance relationships within the input sequence. This is essential for NLP, as medium to long-distance dependencies frequently exist in phrases, sentences, and documents. A few modifications to the basic architecture have been designed to decrease the training difficulty and increase the representation power of RNN, such as LSTM \cite{hochreiter1997long} and GRU \cite{cho2014learning}. Frequently, a bidirectional RNN is used to allow information to flow from the end of the sequence back to the beginning. In QA tasks, these designs are commonly used to encode sentences and questions, perform reasoning over facts and generate answer sequences.
\paragraph{Word Embeddings}
The development of techniques to embed words in a dense lower-dimensional vector space is crucial to almost all types of NLP tasks. Prior to the adoption of these techniques, most NLP processes use one-hot word vectors to represent individual words in a document. This approach has the obvious drawback of being extremely sparse and unable to capture relationships between words. Word embedding generation algorithms such as word2vec \cite{mikolov2013efficient} and GloVe \cite{pennington2014glove} provide us with generic means to create semantically meaningful embeddings for various types of NLP tasks. These embedding techniques exploit the context similarities of words and generate embeddings that usually place semantically or functionally related words close together in the embedded space.
In addition to these general embedding algorithms, it is also possible to train a task-specific embedding by having a neural network find an optimal embedding for the training objective of the network. This is sometimes preferred when the vocabulary size is relatively small and the training examples are abundant. It is also possible to initialise word embeddings in a neural network with a pre-trained generic embedding such as GloVe and fine-tune the embedding with gradients from task objectives.
In QA tasks, words are normally considered as the most basic unit of the documents (character-level models are rare as far as we know), therefore a word embedding layer is usually the first layer of a neural network model for QA. In tasks where the vocabulary size is relatively small (such as bAbI), the usefulness of pre-trained word embeddings are limited, as it is easy for the network to learn the function and relationship of the vocabulary in its own embedding layer(s). However, for tasks with a larger vocabulary, especially when certain words may not appear or only appear a handful of times in the training data, and when the training data size is limited, an expressive word embedding layer might be crucial for the generalisation power of the network.
Another more recent word embedding algorithm is FastText \cite{bojanowski2016enriching}, which utilises sub-word structures such as word roots and suffixes to share representation between similar / related words and "interpolate" word vectors for out-of-vocabulary words. This word embedding algorithm has seen increased use recently and is especially convenient for multilingual tasks, as pre-trained word embeddings already exist for 294 languages.
\paragraph{Phrase and Sentence Representations}
It is often not sufficient to obtain vector representations of natural languages at word level. In almost all NL QA task settings, facts are presented in sentences and paragraphs. There are usually two strategies to convert sentences into vector inputs that can be accepted by a neural network: treating the whole document as a word sequence with separators (including both natural punctuations and artificially inserted dividers), or representing each sentence as a single vector. Both strategies have been used in notable works on text comprehension. For the second strategy, there are more options in how to encode a sentence as a single vector. Weston, et al. \cite{weston2014memory} explored two of the most common techniques for combining words into a single sentence, namely weighted average of word vectors and RNN output over a word vector sequence (more details in the next section).
Another technique of interest is to exploit the recursive structure of a sentence and apply a tree-CNN on a sentence to recursively encode words into phrases then into sentences, such as used in Kalchbrenner, et al. \cite{kalchbrenner2014convolutional}. There lacks a systematic analysis of whether this type of sentence embedding is capable of improving the performance of text comprehension algorithms, but we suspect that the additional incorporated syntactical information could potentially be useful for tasks with long complex sentences where learning about the syntactical structure would have taken up a significant portion of the network’s capacity.
\subsection{Question Answering with Neural Networks}
\paragraph{Comparison of Traditional Approaches with Neural Network Methods}
Collobert, et al. \cite{collobert2011natural} proposed a multi-purpose neural network model for four different NLP tasks. Although not engineered carefully to utilise elaborate linguistic features, the model compares competitively with the state-of-the-art non-neural network NLP systems at the time, with some networks reaching within 1\% of the best model. Even though not a question answering model, this demonstrates that neural network models have great potential in natural language modelling and understanding.
In Hermann, et al. \cite{hermann2015teaching} a comparison is made between the performance of traditional symbolic matching models and neural network models in CNN / Daily Mail reading comprehension tasks. The benchmarking shows an average 10\%+ improvement in accuracy of the best neural network models over the best symbolic matching models.
With more sophisticated model design (such as the architectures mentioned below), deep learning models have managed to exceed the performance of traditional NLP approaches in multiple domains. Apart from the improvement in methodology, the availability of large training data and the increase in computation power has contributed greatly to the rise of neural NLP models, as with other sub-fields of deep learning.
\paragraph{Memory Networks}
One of the most notable works in question answering with reasoning and inference is the Memory Network (MemNN) \cite{weston2014memory}. The main contribution of this research over earlier deep models such as deep LSTM is the introduction of an explicit long-term memory module, allowing information in the network to flow not only from the start of one layer to the end (as enabled by RNN layers), but also from one “scan” of the document to the next, allowing previous states (representing the intermediate result of reasoning) to direct and affect the re-interpretation of the facts at the next time step (via an attention mechanism), thus making multi-step reasoning and fact extraction more viable.
The memory network model is further improved in Sukhbaatar, et al. \cite{sukhbaatar2015end}, allowing it to be trained end-to-end and to be easily applicable to different tasks. In this version of the network, it is able to be trained to map a couple (facts, question) to an answer, which is usually represented as a probability vector over a limited vocabulary. The results on the bAbI dataset from the above two research demonstrate that the Memory Network is capable of performing exceptionally well on multiple types of reasoning tasks, approaching or even reaching zero error rate in some cases, but are still struggling with certain types of tasks such as “positional reasoning” and “path-finding” tasks \cite{weston2014memory, sukhbaatar2015end}.
There are still many potential areas of improvement for the Memory Network model, some of which are addressed in later research works (such as the DMN mentioned below). The representation of input sentences and the interaction of questions and facts are achieved with weighed averaging and inner product respectively, which are relatively simple approaches. The memory size of the network determines the maximum number of facts the model is able to process at the same time, which is not efficient when the fact input is long and irrelevant facts have to stay in memory for the entire duration. Despite a lack of further investigation, we suspect that the number of “rescans” (or reasoning layers) in the network is related to the network’s ability to perform multi-step reasoning (it is observed in the results of Sukhbaatar et al. that more “hops” or reasoning layers generally increase the performance on multi-step reasoning tasks). It is difficult to estimate for a given task, how many reasoning layers is optimal, and it is unknown whether the model can be trained with a variable number of layers.
Other extensions to the memory network include the dynamic memory network (DMN) \cite{kumar2016ask} and key-value memory network \cite{miller2016key}.
\paragraph{Differentiable Neural Computer}
The differentiable neural computer (DNC) \cite{graves2016hybrid} is an external memory-augmented neural network model built to deal with multiple types of tasks in a way more akin to conventional computers. Instead of having the network learn a mapping from inputs to outputs directly, it trains the network to learn a set of operations to manipulate the memory and ultimately to generate the desired output. The network acts as a controller that not only handles input and output, but also issues and receives operations to read and write the memory. The main novelty of this research is the use of differentiable functions in all I/O, operation generation and operation execution steps which enables the training of the network as a whole.
Graves et al. \cite{graves2016hybrid} have shown in their work that the DNC is capable of learning text comprehension tasks, in this case using the bAbI dataset. The network takes individual word tokens as input (rather than whole sentence representations as in MemNN or DMN) and outputs an answer when a “question end” token is encountered. An interesting difference between the DNC and previously mentioned models is that there are no explicit reasoning steps or revisiting of facts in the DNC model. The network learns the operations to store memory about the facts in the memory and to construct answers from the memories directly through supervised learning. Intuitively, it works like an active note-taker, who reads the facts, take “notes” about important information in the facts, reads the question, then piece together the answer from previous notes. The DNC is able to achieve lower error rates and task failure rates on bAbI tasks than previous models with only relatively weak performance in two of the tasks.
The DNC is an interesting idea to combine the advantages of conventional and neural computing, and has shown great promise in its ability to solve a diverse set of tasks. It is worth further investigation to explore its application in question answering.
\subsection{Multilingual NLP and Shared Embeddings}
Neural NLP has also been applied to multilingual scenarios. The most obvious application of deep learning in multilingual NLP is machine translation. Most of the current neural machine translation models are based on the sequence-to-sequence encoder-decoder paradigm \cite{kalchbrenner2013recurrent, cho2014properties}, which uses an autoencoder-like architecture to transform input language representations into an intermediate representation ("encoding"), then decode back to natural language representations through a decoder network. Stacked RNNs are typically used for encoding and decoding of word sequences. The common theme of these methods is finding a shared representation of the source and target language as the "interlingua" for the translation task.
Machine translation is regarded as one of the toughest problems in NLP, and thus it is undesirable to rely on machine translation as a preprocessing step for multilingual NLP. There has been several researches on enabling multilingual representation sharing and knowledge transfer without the need to perform full machine translation. Since word embedding vectors are typically the entry point of a neural NLP process, finding shared embedded representations for two or more languages has been an active area of research. Bilingual (or multilingual) word embeddings can be obtained by either aligning the embeddings in the training process, or aligning existing, pre-trained embeddings through projection or fine-tuning. Usually alignment during training are able to produce higher quality embeddings, but such embeddings might be prone to monolingual performance degradation and are more time- and resource-consuming to train. On the other hand, alignment after training might not produce as high performing results in cross-lingual tasks, but it is possible to avoid monolingual performance degradation and they are relatively cheap to compute.
In Gouws and S{\o}gaard, an algorithm to train a bilingual word embedding using an arbitrary base embedding algorithm and a set of equivalence relationships is proposed \cite{gouws2015simple}. During the training of word embeddings, words are randomly replaced with their equivalences with a set probability so that in a sense, equivalences share their contexts. One major benefit of this approach is that it is not dependent on parallel corpora, and the specific equivalence classes used can be chosen to benefit the task (such as using part-of-speech equivalence instead of meaning equivalence to benefit POS tagging)\cite{gouws2015simple}. However, this alignment approach requires that the embeddings be trained for each bilingual pair and each equivalence relation individually.
In Gouws et al. \cite{gouws2015bilbowa}, a different alignment approach is proposed, in which word alignment is not needed. Instead, word embeddings are trained on monolingual data individually and then the dissimilarity data on a smaller, sentence-aligned parallel corpus is minimised. This approach has the benefit of requiring less granularity in its aligned data input as well as providing moderate speedup compared to previous algorithms, however it is still not as fast as fully-offline methods \cite{gouws2015bilbowa}.
A typical offline alignment algorithm ("align after training") is Mikolov et al.'s translation matrix method \cite{mikolov2013exploiting}, in which a transformation matrix is used to project the vector representations of one language so that the distance between the source and target language vectors are minimised on a set of translation pairs. i.e.
\[ \min_W || Wx_i - z_i||^2 \] where $i$ is the index of translation pairs, and $x_i$, $z_i$ are embedding vectors of the i-th word in language 1 and 2 respectively.
Despite only using a simple linear transformation, this approach is surprisingly effective on top of being cheap to calculate, likely due to the fact that it does not significantly disrupt the linear relationships between monolingual words. A later research by Artetxe et al. found that the distance minimisation objective works best when all word vectors are normalised, and the transformation matrix $W$ is constrained to be orthogonal, as such a treatment preserves the relative distance and position of monolingual word vectors and minimises monolingual performance loss while still achieving good cross-lingual performance boost \cite{artetxe2016learning}. In the latest research on this topic by Artetxe et al., it is found that this offline alignment procedure does not necessarily have to rely on a large set of carefully-selected aligned words, and in the most extreme case, simply aligning the numerals between two languages and gradually expanding the alignment can already achieve reasonable performance in tasks such as word analogy \cite{artetxe2017learning}.
\section{Current Challenges and Research Question}
The recent development in neural question answering systems has paved the way to a future of high-performance question answering systems. However, like many other deep learning-based solutions, these systems have one bottleneck in common, namely the availability of high-quality, high-volume training data. Unlike traditional NLP systems whose rules are manually designed by experts, neural NLP systems have to learn its linguistics knowledge and world knowledge from a large amount of input data. It is suggested that in certain NLP tasks, more than a million training examples are needed for the network to reach optimal performance \cite{banko2001mitigating}.
For question answering tasks, the datasets usually require human annotation, which increases the difficulty of compiling them significantly. Actually one of the first large text comprehension datasets for neural QA benchmarking, the CNN/Daily Mail dataset, circumvents the issue of human annotation by constructing questions from existing news headlines \cite{hermann2015teaching}, which significantly increases the quantity of the data that can be utilised, but also limits the quality of the final dataset. The CNN/Daily Mail dataset contains 380K QA pairs in the CNN section and 879K QA pairs in the Daily Mail section, making it one of the largest public datasets in this category.
The Maluuba NewsQA dataset \cite{trischler2016newsqa}, which is derived from the CNN / Daily Mail dataset, improves the question and answer quality of the original dataset by human annotation. It has a total size of 120K QA pairs.
Another frequently used dataset in recent research, the SQUAD dataset \cite{rajpurkar2016squad}, contains over 100K QA pairs.
Almost all of the current QA datasets for machine learning research are in English with a few exceptions, but even these few datasets in other languages are usually
limited by data quantity or quality. The sheer size of the dataset required to train a performant QA system (or NLP systems in general) makes it especially challenging to develop a model for a low-resource language. The lack of high-quantity training dataset is even true for otherwise widely-spoken languages such as Spanish and Chinese. However, as we can imagine, the demand for question answering systems in these languages are not necessarily lower than English. This inevitably puts the onus of collecting a sizeable dataset in the target language on the developer of the system, which may be beyond their capabilities.
Since building large datasets for different languages is difficult, it would be immensely helpful if instead of having to individually collect a huge amount of data for each language, we could utilise the relative abundance of training data in resource-rich languages to boost the performance of QA systems trained with limited data in a resource-limited language. Therefore we raise the following research question:
\begin{center}
\fbox{\begin{minipage}{30em}
Is it possible to apply transfer learning to a question answering task so that models developed in one language can be used to improve the performance of models developed for another language?
\end{minipage}}
\end{center}
In particular, we have the following sub-questions:
\begin{itemize}
\item[1] Is there anything in common between the same task in different languages that can be learned once and applied to a new language?
\item[2] Is there any common features among different languages that can be exploited to transfer linguistic knowledge between tasks?
\item[3] What kind of neural network architecture can be designed to benefit the most from transfer learning?
\end{itemize}
\section{Transfer Learning}
If we consider two text comprehension tasks in two different languages, they are not entirely independent. There exist two main sources of information that can be shared between the tasks:
\begin{itemize}
\item[1] The languages themselves have some common linguistic features that can be exploited, such as words with equivalent meanings, similar syntactical structures in two languages, etc.
\item[2] The task itself may require the same logical steps to complete, regardless of the language of its input and output.
\end{itemize}
Therefore, in order to reuse the knowledge learned in a resource-rich language, we consider possible ways to exploit task similarities in these two aspects.
For exploiting similarities between two languages, the most straightforward approach is to increase the similarity of the representations of words from the two languages. We therefore explore the possibility of \textbf{aligning the word embeddings} of the two languages so that similar-meaning words in two languages may have similar vector representations.
For sharing common knowledge for the same task, we consider \textbf{reusing (or sharing) the same network / network layers} between two tasks, so that the network may learn to perform a certain function (such as context matching or reference resolution) on the resource-rich language and generalise it to the resource-limited language.
Generally, for transfer learning in neural networks, there are two basic approaches, namely fine-tuning (sequential training) and joint training (simultaneous training). The effectiveness of each strategy varies from task to task. In this research we consider both approaches and compare their performance in cross-lingual transfer learning.
\section{Data Collection}
For the purpose of transfer learning and performance comparison, we require two comparable corpora in two different languages with their associated QA pairs. However, as mentioned ealrlier in chapter 4, such datasets are not readily available. Therefore, it is necessary to compile a new bilingual dataset for our experiments. Construction of a human-annotated dataset is outside the scope of this research, but the same methodology used in the compilation of the CNN / Daily Mail dataset \cite{hermann2015teaching} is capable of constructing a reasonably accurate QA dataset from raw news articles without the need for human annotation. For our experiments, we use the CNN section of the CNN / Daily Mail dataset as the English corpus, and compile a new dataset in Spanish following the same methodology.
\subsection{Data Collection Strategy}
In Herrmann et al. \cite{hermann2015teaching}, the QA pairs are generated from news articles with several bullet points. Usually the bullet points on news websites are a summary of one of the main topics of the article. By removing one of the key words in a bullet point and requiring the QA system to find the best matching word from the article to fill in the blank, we are essentially creating a reading comprehension question for the given article. However, usually only questions generated by replacing unique entities result in meaningful reading comprehension questions, whereas questions generated from replacing common words could often be answered by applying grammatical rules or learning common collocations, therefore the question generation is based on the removal of named entities only. To generate a question from a news article, the bullet points and the news story (content body) is separated and paired up, then one of the named entities in the bullet point is replaced with a \textbf{@placeholder} marker to generate a question about the news story.
To further ensure that the system actually learns to perform reading comprehension rather than simple blank word deduction based on collocation (Such as automatically deduce "Olympics" from having "Rio" as the previous word), in Hermann et al. \cite{hermann2015teaching}, all named entities are replaced with an entity marker \textbf{@entityX} where X is a random or ordinal integer. This procedure can be performed via named entity recognition.
\subsection{Source Corpus Compilation}
\begin{center}
\begin{figure}
\includegraphics[width=\linewidth]{media/article.png}
\caption{Example of Scraped Web Page from El Mundo (from \cite{elmundosample})}
\label{fig:article}
\end{figure}
\end{center}
\begin{center}
\begin{figure}
\fbox{\begin{minipage}{30em}
\paragraph{bullet points}
\begin{itemize}
\item Mónica Spear y su marido fueron asesinados en presencia de la hija de ambos de 5 años
\item Han detenido a 7 personas intregrantes de la banda "Los Sanguinarios de El Cambur"
\item Venezuela tiene una gran tasa de criminalidad: 39 asesinatos por cada 100.000 habitantes
\end{itemize}
\paragraph{story}
Esta semana Venezuela ha recibido una noticia que ha causado gran consternación en todo el país: el asesinato de la actriz, modelo y ex Miss Venezuela Mónica Spear y de su esposo, el irlandés Thomas Henry Berry .
El fatídico hecho se produjo en una autopista Venezolana cuando el matrimonio se disponía a regresar a su casa. En el interior del vehículo, se hallaron los cuerpos sin vida de la joven pareja y a la hija de ambos, de 5 años que también resultó herida.
Miles de personas acudieron al velatorio de la pareja. Tanto artistas como familiares y allegados de la actriz y el empresario europeo acudieron desde el mediodía al Cementerio del Este de Caracas para dar el último adiós a la pareja.
...
\end{minipage}}
\caption{Collected Spanish Corpus Sample}
\label{fig:escorp}
\end{figure}
\end{center}
To prepare a dataset in Spanish following the same format, we scrape a total of 37.7K news articles from El Mundo (www.elmundo.es) and CNN Spanish (cnnespanol.cnn.com). The links for news articles from 2014 to 2017 are obtained from historical versions of the front page and categorical portal pages of the websites archived at Wayback Machine (archive.org/web). The articles are filtered to remove video- / picture-only pages and pages only containing extremely brief text (less than 50 words) as well as articles longer than 2000 words for content validity and consistency. The articles are stored as (url, story, bullet point) tuples and duplicated articles are removed. An example of an article before being processed can be seen in figure \ref{fig:article}. Another example from the collected news article corpus can be seen in Figure \ref{fig:escorp}.
\subsection{Data Preprocessing for QA Task}
\begin{center}
\begin{figure}
\fbox{\begin{minipage}{30em}
\paragraph{questions}
\begin{itemize}
\item @placeholder y su marido fueron asesinados en presencia de la hija de ambos de 5 años
\item Han detenido a 7 personas intregrantes de la banda "@placeholder"
\item @placeholder tiene una gran tasa de criminalidad: 39 asesinatos por cada 100.000 habitantes
\end{itemize}
\paragraph{answers}
@entity10, @entity1, @entity12
\paragraph{story}
Esta semana @entity12 ha recibido una noticia que ha causado gran consternación en todo el país: el asesinato de la actriz, modelo y ex @entity8 @entity10 y de su esposo, el @entity13 @entity6 . El fatídico hecho se produjo en una autopista Venezolana cuando el matrimonio se disponía a regresar a su casa. En el interior del vehículo, se hallaron los cuerpos sin vida de la joven pareja y a la hija de ambos, de 5 años que también resultó herida.Miles de personas acudieron al velatorio de la pareja.
...
\end{minipage}}
\caption{Generated Spanish QA pairs example}
\label{fig:esqa}
\end{figure}
\end{center}
To match the entity anonymisation used in the CNN / Daily Mail dataset, we also perform entity recognition and replacement in the Spanish corpus. Named entity recognition is performed via the Google Cloud Natural Language API (https://cloud.google.com/natural-language/) and a list of named entities (including unique names, locations and organisations) are generated for each (story, bullet point) pair. For each (story, bullet point) pair, matched entities (such as the full name and abbreviations of the same organisation, or full name and last name of a person) are replaced with the same entity marker. Finally, one of the replaced entities in the bullet point is replaced with the \textbf{@placeholder} marker to generate the question. One bullet point may be used to generate multiple questions if it contains more than one entity from the story. We generate a total of 76K QA pairs via this process. An example of generated QA pairs can be seen in figure \ref{fig:esqa}.
\subsection{Word Embeddings}
In order to compare the transfer learning performance of the networks using unaligned word embeddings and aligned word embeddings, we need individual embedding vectors for both English and Spanish, as well as the aligned versions of these embedding vectors. In our experiments, we use the pre-trained 300-dimensional FastText embeddings \cite{bojanowski2016enriching} due to its large training corpus (Wikipedia) and availability for both languages. For embedding alignment, we use the algorithm outlined in Artexte et al. (2016) \cite{artetxe2016learning} to calculate aligned embeddings via orthogonal transformation (details in the next section). For the aligned vocabulary dictionary needed for embedding mapping, we use the alignment dictionary of OpenSubtitles 2012 parallel text dataset \cite{TIEDEMANN12.463}, available on the Open Parallel Corpus website (http://opus.lingfil.uu.se/index.php). The original FastText embedding vectors for English and Spanish exceed 6.6G and 2.6G respectively, and are thus required to be trimmed to the subset of words that are present in our news article QA datasets. The trimmed version of the embedding vectors are 210MB and 260MB respectively and can be easily handled with our existing experiment setup.
\section{Proposed Solutions and Model Description}
We propose two network architectures for implementation of cross-lingual transfer learning. The first approach relies on sequential transfer learning and uses fine-tuning of models trained on a resource-rich language as the main technique of model adaptation. We also apply word embedding alignment as an additional technique of knowledge sharing. The second approach relies on the joint training of models on two languages simultaneously to directly share intermediate representations of the network. In order to encourage the network to learn a truly shared representation for both languages rather than learning two representations in the same network, we also experiment with adding a penalty term optimised via adversarial training.
\subsection{Base QA Model}
\begin{center}
\begin{figure}
\includegraphics[width=\linewidth]{media/attentive.png}
\caption{Modified Attentive Reader (based on Hermann et al. \cite{hermann2015teaching} and Chen et al. \cite{chen2016thorough})}
\label{fig:attentive}
\end{figure}
\end{center}
For the base monolingual model for question answering, we adopt the improved attentive reader model described in Chen et al. \cite{chen2016thorough}. A basic outline of the network can be seen in figure \ref{fig:attentive}. The network applies two bidirectional RNNs on the embedding vectors of words in the story and question respectively, obtaining individual context-dependent word representations of each word in the story and a summary representation for the question. A similarity score is calculated between the representations of story words and the question to calculate the attention score of each word. A weighted sum of the story word representations (weighed by their attention scores) is then calculated to obtain the representation of the attended word(s). Finally, one or more dense layers act as the classifier to map the attended word(s) representation to the one-hot representation of the answer. The model can be formulated as follows:
\textit{Let $C_i$ be the i-th word in the story, and let Q be the question. Let $R_i$ be the context-dependent representation of $C_i$ and $r$ be the summary representation of the question. We use $a_i$ to denote the attention score, $u$ to denote the attended word representation and $o$ the answer's class. $M$ is a matrix parameter in the bilinear term $R_i^T M r$ to add more expressiveness of the similarity function (as compared to simpler similarity measures like dot product), as suggested by Chen et al. \cite{chen2016thorough}.}
\[ R_i = \phi_{RNN}^C(emb(C_i))[:] \]
\[ r = \phi_{RNN}^Q(emb(Q))[-1] \]
\[ a_i = Softmax(R^T M r) \]
\[ u = \sum_i a_i R_i \]
\[ o = Softmax(\phi_{Linear}(u)) \]
In practice, this model performs by matching the context in each story position with the question representation, find the positions that best match the \textbf{@placeholder} token's context and focus its attention on these positions. The weighed sum step evaluates the attention received by each position and finds the position with the maximum attention, which is assumed to be the position of the answer token. The final dense layer(s) then map the representation of the answer token (if the attention mechanism finds it correctly) back to its class (i.e. ID of the answer keyword).
\subsection{Sequential Transfer Learning}
In sequential transfer learning, we first learn a model on $L_1$:
\[ M_1(C_1, Q_1) \to A_1\]
Then we fine-tune the trained model $M_1$ on the $L_2$ data to adapt the model to $L_2$:
\[ M_1'(C_2, Q_2) \to A_2\]
where $M_1'$ is initialised with the parameters of $M_1$ and trained with lowered learning rate.
In the most basic form of sequential transfer learning, the model is identical to the base monolingual QA model, except it is trained twice in two different languages. The intuition is that the weights of the network encode information about how to find a proper context-aware representation for word and phrases (mainly via the biRNN layer), as well as how to perform context matching and how to map attend entities back to answer word classes. Only the first step (calculating context-aware representations) is highly language-dependent, whereas the following steps can be though of as language-independent in the abstract sense. Therefore by fine-tuning the network, it should ideally "forget" about the language-dependent functions and retain the ability to perform the language-independent steps.
We may also align the word embeddings so that similar words in the two different languages are represented by word vectors that are close to each other in the shared embedding space. In this way, we may reduce the distance between the representation of similar expressions in $L_1$ and $L_2$, and in turn hopefully reduce the difference between optimal weights of the network for $L_1$ and $L_2$. Perfect word embedding alignment of two languages is not possible, as it is equivalent to solving machine translation, which is an immensely difficult task. By applying the vector alignment technique introduced by Artetxe et al. \cite{artetxe2016learning}, it is possible to minimise the difference between two sets of word embeddings for a selected list of words. It is suggested in \cite{artetxe2016learning} as well as \cite{duong2017multilingual} that such an alignment procedure is also capable of aligning words outside of the alignment dictionary via the preservation of relative position of an arbitrary word relative to these "anchor" words, as discussed in chapter 3. Therefore, we may use the following objective to find the best projection for aligning two word embedding matrices:
\[ arg\min_P ||PX_1 - X_2|| \]
where P is an orthogonal matrix.
We may then formulate the shared embedding model as follows:
Define:
\[ \phi(\cdot)=\phi^{RNN}((\phi^{word\_emb_{L_1, L_2}}(\cdot) + \phi^{entity\_emb}(\cdot)) \]
\[ \sigma(R, r)=softmax(R^T M r) \]
Then:
\[ R_i^{(j)}[:]=\phi^{(C)}(C_i^{(j)})[:] \]
\[ r_i^{(j)}=\phi^{(Q)}(Q_i^{(j)})[-1] \]
\[ \gamma(R, r) = Softmax[Linear(\sigma(R, r)^T R)] \]
And the training objective can be defined as:
\[ argmin_{\Theta_\phi, \Theta_\gamma, \Theta_\sigma} -logP(A|Q, C, \Theta_\phi, \Theta_\gamma, \Theta_\sigma) \]
We use $\Theta$ to denote the parameters for a certain network.
\subsection{Joint Learning with Adversarial Training}
In the joint learning approach, we train the same model to perform the same QA task in both languages. We have only one model:
\[ M(C_i, Q_i) \to A_i \]
where $i$ can either be 1 or 2.
In its most basic form, the network architecture is also identical to that of the base monolingual model, and the only difference is in the composition of the training examples. The network now takes a random mixture of training examples from two languages and has to predict the answer regardless of input language. For the simple joint training case, we use aligned word vectors as in the last section to increase task similarity between two languages.
Two of the main challenges of joint training a network on two tasks are:
\begin{itemize}
\item[1] If the network is too expressive (having more parameters than optimal), the network might overfit and learn two separate modes for two different tasks, undermining the whole point of joint training.
\item[2] If the network is not expressive enough, the network might fail to learn a useful shared intermediate representation for the two tasks, learning a set of "average weights" that perform well on neither tasks.
\end{itemize}
To avoid these issues, we have to find a way to encourage the network to share intermediate representations between tasks, or equivalently, penalise the network for using significantly different representations for separate tasks. Suppose $R^*$ is an ideal shared representation between tasks in $L_1$ and $L_2$, we wish to find a penalty term similar to the following form:
\[ -\alpha (||R^* - R_1|| \cdot I_{L_1} + ||R^* - R_2|| \cdot I_{L_2}) \]
(Here the $R$'s do not strictly correspond to the $R_i$'s in the base model. They may correspond to intermediate representations from $R_i$ and $r$ to $u$ in the original model, depending on implementation details, but the idea is the same: share representation between two tasks at some level of the network.)
In other words, a penalty term that penalises the network for adopting an intermediate representation that is too different from the ideal, shared representation. However, such a penalty function does not readily exist because we cannot know the ideal shared representation $R^*$ beforehand. However, we do know that if $R_1$ and $R_2$ deviates from $R^*$ too much, they will be different from each other, and if the average of $||R_1 - R_2||$ is significant, a discriminator network can be trained to distinguish them from each other and determine their source language just by looking at these intermediate representations. Therefore it is possible to train a discriminator network whose performance score can serve as the penalty term for not sharing representations.
We may formulate the network with the discriminator included as follows (choosing the attended word representation layer $u$ for applying the discriminator):
(Same definition for $\phi(\cdot)$ and $\sigma(\cdot, \cdot)$ as above)
\[ R_i^{(j)}[:]=\phi_i^{(C)}(C_i^{(j)})[:] \]
\[ r_i^{(j)}=\phi_i^{(Q)}(Q_i^{(j)})[-1] \]
\[ u = \sigma(R, r)^T R \]
\[ \gamma(u) = Softmax[Linear(u)] \]
\[ \delta(u) = \delta^{MLP}(u) \]
One challenge of training this network is that it has multiple objectives. The primary objective is to minimise the loss of the answerer network $\gamma$, but the answerer network also has to take into account the penalty term and lower the accuracy (i.e increase the loss) of the discriminator network. However, in the mean time, the discriminator network also has to be trained, and its weights should be optimised to lower the discriminator loss. These objectives are at odds with each other. Fortunately, it is possible to combine the training objectives of the answerer network and discriminator network together and simultaneously update their weights following opposite gradient directions, using an adversarial training technique introduced by Ganin et al. \cite{ganin2016domain}.
\begin{center}
\begin{figure}
\includegraphics[width=\linewidth]{media/adversa.png}
\caption{Modified Attentive Reader with Adversarial Training}
\label{fig:adversa}
\end{figure}
\end{center}
We may write the joint training objective as follows:
\[ argmin_{\Theta_\phi, \Theta_\gamma, -\Theta_\delta} [-logP(A|Q, C, \Theta_\phi, \Theta_\gamma) + \alpha \cdot log P(\lnot L|Q, C,\Theta_\phi, \Theta_\delta)] \]
(we include $\Theta_\sigma$ in $\Theta_\phi$ for clarity)
Notice that $\Theta_\phi$ and $\Theta_\gamma$ are parameters of the answerer network (part of the network in the base model), whereas $\Theta_\delta$ is the parameters of the discriminator network. This training objective follows the gradient of the answerer network weights to minimise answerer loss and maximise discriminator loss, and also simultaneously follows the negative gradient of the discriminator network weights to minimise the discriminator network loss. Intuitively there exists a competition between the answerer network and the discriminator network, where the discriminator network strives to best classify intermediate representations by their language, and the answerer network tries to confuse the discriminator network by outputting similar-looking intermediate representations. In this way, we are able to encourage the answerer network to produce an intermediate representation that shares as much information as possible between tasks from the two languages. The combined architecture of the answerer and discriminator network is shown in figure \ref{fig:adversa}.
\section{Experiments}
\subsection{Experiment Setup}
The network architectures from the previous chapter are implemented in PyTorch 0.2. For word embeddings, we use the trimmed version of the pre-trained 300-dimensional FastText vectors provided in \cite{bojanowski2016enriching} and their aligned versions, as described in the "Data collection" section. For words in the news articles that do not have an existing embedding vector, we map them to one of the 100 randomly generated embedding vectors (all instances of the same word are mapped to the same vector). For entity tokens and placeholder tokens, we also map them to random embedding vectors sampled from a normal distribution. All embedding vectors in our experiments are normalised to length 1. We share the embeddings of the entity tokens between two languages in all tasks. Throughout our experiments, we do not perform fine-tuning on embedding vectors and use them as is. (Main reason for this is that fine-tuning on the embedding vectors would expose the answerer network to the language label of the input indirectly in the joint + adversarial training case, which is not desirable.)
For our model parameters, we mostly use the same values recommended by Chen et al. \cite{chen2016thorough}. We use a batch size of 32. For the biRNNs, we use a hidden layer size of 128. We apply a dropout rate of 0.2 on the embedding layer output. For the initial training of models as well as joint training, we use the Adam optimiser \cite{kingma2014adam} with a learning rate of 0.001. For fine-tuning, we use Adam with learning rate 0.0001.
Similar to \cite{chen2016thorough}, we adopt a few additional techniques to improve the model performance:
\begin{itemize}
\item Using entity relabeling to label the entities based on their order of appearance. This we believe is potentially implicitly assigning different prior probabilities to entities appearing in different positions in an article. For instance, over time, the network may learn to assign higher answer probability to entities from the beginning of the story, as it is common for news articles to contain main points in opening paragraphs. This entity labeling strategy allows the network to exploit such patterns.
\item Predicting an answer only in the subset of entities that actually appear in a document.
\item Using a bilinear term instead of a dot product for similarity measure between the question and contextualised story word representations.
\end{itemize}
For adversarial training, a weight must be given to the discriminator loss to balance the importance of answerer objective and discriminator objective. The weight of the discriminator loss is set to 0.1 after parameter tuning.
The experiments are run on a cluster with nVidia P100 GPUs. The average training time for single language model training takes approximately 2-3 hours before hitting early stopping criteria. Fine-tuning takes 1-2 hours on average. Joint models take approximately 4-5 hours to converge.
\subsection{Results}
\begin{center}
\begin{table}
\begin{tabular}{| p{2.5cm} || p{2cm} | p{2cm} | p{2cm} | p{2cm} |}
\hline
& En. Mono. & Es. Mono. & En.$\to$Es. (no tuning) & En.$\to$Es. (tuned)\\
\hline
Unaligned Embeddings & 0.65666 & 0.42089 & 0.21908 & 0.54538 \\
\hline
Aligned Embeddings & 0.66291 & 0.41596 & 0.26405 & \textbf{0.56622} \\
\hline
Joint Training & 0.63226 & 0.51001 & - & - \\
\hline
Adversarial Training & \textbf{0.66979} & 0.5133 & - & (0.51714) \\
\hline
\end{tabular}
\caption{Experiment Results}\label{table:res}
\end{table}
\end{center}
The experiment results can be seen in table \ref{table:res}:
The rows represent English monolingual performance, Spanish monolingual performance, English to Spanish transfer learning without fine-tuning, and English to Spanish transfer learning with fine-tuning. The columns represent sequential transfer learning using unaligned word embeddings, sequential transfer learning using embedding alignment, joint training of bilingual models and joint training of bilingual models with the adversarial term. Bold numbers are the best performance observed in all experiments on that language.
We have the following observations based on the data:
\begin{itemize}
\item In monolingual experiments without any transfer learning, the model performs much better on English tasks, most likely due to having 5 times as much training examples than Spanish.
\item Using aligned word embeddings do not noticeably degrade the monolingual performance of the QA model and in some cases even improve the performance, although the improvement is likely by chance. This confirms that word embedding alignment through orthogonal transformation preserves monolingual information.
\item Surprisingly, the best monolingual performance in English is achieved when using joint bilingual training with adversarial training. This is unexpected because the monolingual performance on Spanish data is much lower than English, so some moderate negative impact on English performance is expected even with the adversarial penalty term.
\item Sequential transfer learning through fine-tuning has a significant beneficial impact on Spanish task performance. This effect is large even when using unaligned embeddings.
\item Using aligned word embeddings further improves the performance of sequential transfer learning model on Spanish tasks, but not by as much as using fine-tuning itself.
\item Using aligned word embeddings without any fine-tuning (directly applying English model on Spanish test data) actually improves the performance on Spanish tasks, but it is much lower than the model trained on Spanish data alone.
\item Joint learning with adversarial learning does not seem to perform better for Spanish data than fine-tuning + aligned embeddings.
\end{itemize}
\section{Discussions}
\begin{center}
\begin{figure}
\includegraphics[width=\linewidth]{media/curve.png}
\caption{Training Curve for Sequential Transfer Learning}
\label{fig:curve}
\end{figure}
\end{center}
\paragraph{Sequential Transfer Learning}
There are several interesting observations from the experiment results. The first major observation is the effectiveness of sequential transfer learning through fine-tuning. It is remarkable to see a simple and relatively naive approach achieving over 20\% accuracy gain. We further investigate the performance gain through fine-tuning by comparing the training curve with and without fine-tuning (figure \ref{fig:curve}).
As we see in figure \ref{fig:curve}, without any form of transfer learning, learning with Spanish tasks are slower and peak at below 45\% accuracy. Through fine-tuning of models initialised with English tasks, the model not only converges faster, but also achieves a higher performance in the end. When aligned word embeddings are used along with fine-tuning, the model converges at a similar rate to the fine-tuned model with original embeddings, but achieves slightly lower loss an higher accuracy.
We have initially suspected that embedding alignment would be the more crucial step in sequential transfer learning, because it affects the input layer of the network and all downstream network parameters depend on it, also because shared word embeddings was shown to be helpful in other tasks like word analogy and machine translation \cite{mikolov2013exploiting,artetxe2017learning}. However, this is not the case according to our observations. The majority of prediction accuracy gain comes not from word embedding alignment but fine-tuning of the network parameters. Although this appears to suggest the notion that fine-tuning plays a bigger role in cross-lingual transfer learning than aligned word embeddings, we have good reasons to believe it is highly task-dependent. The task we evaluate our models on favours the use of fine-tuning while does not rely on alignment of language representations as much. In particular:
\begin{itemize}
\item Questions generated from news article bullet points are relatively simple, and typically do not require a deep understanding of the text to answer. The questions can often be answered by context matching alone, without much need for further logical reasoning, as observed in \cite{chen2016thorough} for the original CNN / Daily Mail dataset. Our expansion of the dataset into the Spanish language is based on the same data collection and processing procedure, and therefore our dataset also inherits the same issue. The implication of this is that answering these questions does not require much language-dependent knowledge or even meaning-related knowledge that should be transferred from the source language to the target language. Often, simply finding the same or similar words in the story as in the question is sufficient to locate the answer. This diminishes the need and usefulness of shared word embeddings, which primarily transfers knowledge about word meaning and word relationships.
\item We used entity replacement to swap out all named entities in the news articles, and we further restricted all possible question answers to be from the set of entity tokens. This has the unintended effect of simplifying the question answering process to finding the right context around an entity token that best resembles that of the \textbf{@placeholder} token, then mapping the entity token back to its entity ID (class). This task is not strongly dependent on the particular form of language representation, and it is reasonable to believe that a network trained to perform this context matching and entity mapping task in one language can be expected to perform well in a second language with relatively little adjustments.
\item The reason that the model trained on Spanish data alone does not perform as well as fine-tuned model is likely that it does not have enough training examples to match similar context between the story and question (in the attention layer's $\phi_\sigma(\cdot)$) and to map an attended entity back to its ID (in the final dense layer). These are relatively trivial tasks and are essentially the same task regardless of the source language, so we can expect the large improvement by inheriting the model parameters from an English model.
\end{itemize}
Nevertheless, we still see a performance increase with the introduction of aligned word embeddings, indicating that some language-related knowledge transfer is still helpful for this particular task. It is reasonable to believe that in a QA task where language-related knowledge is utlised more frequently (such as when synonyms are often used interchangeably or when some deduction based on topics are involved), word embedding alignment might be able to provide a more substantial boost to performance.
\paragraph{Joint Training and Adversarial Training}
Another interesting observation from the results is that although the use of adversarial training increases the performance of both languages compared to training them individually, the improvement of joint learning with adversarial training is not as large as using aligned embeddings with fine-tuning on the Spanish tasks. There are several likely reasons why fine-tuning appears to achieve higher performance:
\begin{itemize}
\item The model is “overwhelmed” by English data. The training dataset of English tasks is 5 times as large as the Spanish dataset. This has the unfortunate effect of biasing the network towards performing better on English questions, even with the presence of the adversarial penalty term. We have attempted to remedy this through supersampling the Spanish data 1:5 to balance each minibatch, however that leads to higher overfitting and is overall even more detrimental to performance.
\item Similar to the discussion in the sequential transfer learning, the nature of our evaluation task is already language-independent to some degree. We train the adversarial network to distinguish attended word representations between source languages. However, since all possible answers are entity tokens (\textbf{@entityX}), whose embeddings are already shared between languages, and the network is trained to focus its attention on these tokens, the task itself is already language-independent to some degree, and therefore adversarial training cannot provide much contribution to the language-independence of this intermediate representation.
\end{itemize}
However, it is worth noting that the adversarial training model on English tasks actually manages to outperform the model trained solely in English. Apart from actual sharing of knowledge between English and Spanish tasks, we believe another factor might also play a role in this outcome. It is possible that the discriminator also serves as extra regularisation during training, as overfitted examples are more likely to be distinct from other examples and might end up being easier from the discriminator to classify its language. By penalising high discriminator accuracy it is likely that we are also controlling overfitting of training examples.
\section{Conclusion and Current Limitations}
Although limited in scope, our experiments have shown that cross-lingual transfer learning is indeed possible for our QA problem, and can be highly effective in certain scenarios. We have studied the cross-lingual performance of model fine-tuning, aligned word embeddings and joint training with adversarial training, and they have all shown promise in our experiment results. Fine-tuning is a simple approach with surprisingly high performance gain, which we believe is best achieved when the task does not heavily rely on linguistic knowledge that depend on the language itself. Word embedding alignment through orthogonal transformation is capable of increasing cross-lingual performance in all our experiment settings with minimal to no loss on monolingual performance, providing us with an inexpensive way of boosting transfer learning outcome. Joint training with adversarial learning provides better performance compared to both our monolingual results in the experiments, although for transfer learning from resource-rich language to resource-constrained language, it is still not achieving higher prediction accuracy than fine-tuning-based transfer learning, at least for this specific QA problem.
There are multiple limitations of our current QA problem and research scope, including:
\begin{itemize}
\item The quality of the dataset is not the most ideal. Due to our time and resource limitations, we have to rely on a machine-generated dataset. However, due to the flaws in data collection and preprocessing procedure and errors in entity recognition, the dataset itself is not perfectly accurate. Moreover, the types of questions that exist in the dataset are limited and are typically not challenging from a reading comprehension prospective.
\item We are only able to consider transfer learning between two languages due to scope limitations. However, the same dataset generation procedure allows multiple languages with varying degrees of similarity to be considered at the same time, which may provide better insight into how well each of the transfer learning schemes work.
\item The process of entity anonymisation is necessary for keeping the questions challenging, but as discussed in the last chapter, they remove much of the language dependency of the task and artificially lower the difficulty of learning. Realistic tasks are not likely to benefit from these artificial language independence of the tasks.
\item For consistency between joint training and sequential training, we do not fine-tune the embedding vectors throughout the experiments. yet arguably fine-tuning of embedding vectors should be beneficial for both monolingual and cross-lingual performance, as the pre-trained general-purpose word embeddings might not be ideal as task-specific word representations for a given QA task.
\end{itemize}
\section{Future Work}
For future follow-up work of this research, we suggest the following directions:
\begin{itemize}
\item Explore cross-lingual transfer learning in more challenging datasets and with more advanced base QA models. One of the advantages of methods studied in this research is that most of them can be adopted to arbitrary learning algorithms and network architectures, so studying their effect on different tasks and different models is possible and may provide great insight into how well these transfer learning methods work in different scenarios. As a special case, it would be interesting to explore the performance of a similar experiment setup to this research on a problem that does not have entity replacement and is thus more language-dependent inherently. In addition, it might be interesting to see whether some of the state-of-the-art models are compatible with these simple, inexpensive cross-lingual transfer learning techniques.
\item Explore how well the model performs when the embedding vectors themselves are fine-tuned. This is especially interesting when adversarial joint training is involved, as it may lead to the model eventually learning a shared task-specific embedding.
\item Explore the option of using adversarial training to directly perform embedding alignment.
\end{itemize}
\newpage
\bibliographystyle{acm}
\bibliography{thesis}
\end{document}
|
{"hexsha": "ecde47e087ae6a9bc12ea6e158fa68a5fd6eb5d1", "size": 76381, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ideas/thesis.tex", "max_stars_repo_name": "Mithrillion/BiQA", "max_stars_repo_head_hexsha": "f61bea95521f5b2ffd838aa60aecaad568de6564", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ideas/thesis.tex", "max_issues_repo_name": "Mithrillion/BiQA", "max_issues_repo_head_hexsha": "f61bea95521f5b2ffd838aa60aecaad568de6564", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ideas/thesis.tex", "max_forks_repo_name": "Mithrillion/BiQA", "max_forks_repo_head_hexsha": "f61bea95521f5b2ffd838aa60aecaad568de6564", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 124.1967479675, "max_line_length": 2856, "alphanum_fraction": 0.8024770558, "num_tokens": 16066}
|
import math
from scipy.special import logsumexp
def logsumexp_list(lst):
while len(lst)>1:
a = lst.pop(0)
b = lst.pop(0)
c = b + math.log10(math.exp(a - b) + 1)
lst.insert(0,c)
return lst[0]
def forward(X):
K = 2
F0_1 = []
F0_2 = []
E = [[1/6,1/6,1/6,1/6,1/6,1/6], [1/10,1/10,1/10,1/10,1/10,1/2]]
a = [[0.95,0.1], [0.05,0.9]]
for i in range(K):
if i == 0:
F0_1.append(1)
F0_2.append(0)
else:
F0_1.append(0)
F0_2.append(-1000000)
Fi_1 = F0_1
Fi_2 = F0_2
Slst = []
for i,xi in enumerate(X):
next_fi1 = []
next_fi2 = []
si = 0
for l,el in enumerate(E):
sumf = 0
logsumexplst = []
for k,fi in enumerate(Fi_1):
if i == 0:
sumf += fi * (1/2)
else:
sumf += fi * a[l][k]
for k,fi in enumerate(Fi_2):
if i == 0:
logsumexplst.append(fi + math.log(0.5))
else:
logsumexplst.append(fi + math.log(a[l][k]))
sumf1 = logsumexp_list(logsumexplst)
next_fi1.append(el[xi-1]*sumf)
si += el[xi-1]*sumf
next_fi2.append(math.log10(el[xi-1]) + sumf1)
Fi_2 = next_fi1
next_fi2 = []
Slst.append(si)
for j in next_fi1:
next_fi2.append(j/si)
Fi_2 = next_fi2
answer2 = 1
for s in Slst:
answer2 = answer2*s
print(answer2)
ans = 0
for f in Fi_2:
ans += math.exp(f)
print(ans)
X = "315116246446644245311321631164152133625144543631656626566666651166453132651245636664631636663162326455236266666625151631222555441666566563564324364131513465146353411126414626253356366163666466232534413661661163252562462255265252266435353336233121625364414432335163243633665562466662632666612355245242"
X = [int(i) for i in X]
print(len(X))
forward(X)
|
{"hexsha": "813ab8a321d21fb5863ce98b2ff1093adbdf4c4f", "size": 2114, "ext": "py", "lang": "Python", "max_stars_repo_path": "bio-info/bioinfo_6.py", "max_stars_repo_name": "kyamada101/Python", "max_stars_repo_head_hexsha": "a9be850b1818fb4784cb84e86b20cf2c61784e38", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bio-info/bioinfo_6.py", "max_issues_repo_name": "kyamada101/Python", "max_issues_repo_head_hexsha": "a9be850b1818fb4784cb84e86b20cf2c61784e38", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bio-info/bioinfo_6.py", "max_forks_repo_name": "kyamada101/Python", "max_forks_repo_head_hexsha": "a9be850b1818fb4784cb84e86b20cf2c61784e38", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1025641026, "max_line_length": 306, "alphanum_fraction": 0.5061494797, "include": true, "reason": "from scipy", "num_tokens": 691}
|
"""This class is used whenever a security evaluation job is requested. It creates the security
evaluation job starting from the parameters of the request."""
import bisect
import os
from typing import List, Union
import numpy as np
import torch
from .classification.attack_classification import AttackClassification, SUPPORTED_ATTACKS
from .dataset_loader import CustomDatasetLoader
from .model_loader import ModelLoader
ATTACK_CHOICES = {
'linf': [('PGD', 'pgd-linf'), ('Random', 'noise-linf')],
'l2': [('PGD', 'pgd-l2'), ('CW', 'cw'), ('Random', 'noise-l2')],
}
PERT_SIZES = {
'linf': [("1/255", 1 / 255), ("2/255", 2 / 255), ("4/255", 4 / 255), ("8/255", 8 / 255), ("16/255", 16 / 255)],
'l2': [("0.01", 0.01), ("0.02", 0.02), ("0.05", 0.05), ("0.1", 0.1), ("0.2", 0.2), ("0.5", 0.5)],
}
class EvaluationManager:
def __init__(self, dataset_id: str,
model_id: str,
metric: str = None,
attack: str = None,
perturbation_values: List[Union[int, float]] = None,
evaluation_mode: str = 'complete',
task: str = 'classification',
indexes: List[int] = None,
preprocessing: dict = None,
attack_params: dict = None):
"""Performs security evaluation for a given model and dataset.
:param dataset_id: Path of the dataset.
:param model_id: Path of the model.
:param metric: Metric to use for the evaluation. Currently,
only `classification-accuracy` is available as metric.
:param attack: Algorithm to use for attacking the model.
:param perturbation_values: List of integers containing the
x-values for the security evaluation curve. For each point,
a perturbation of type `perturbation-type` with constrained
norm equal to the x-value will be applied.
:param evaluation_mode: Indicates a particular configuration
for the experiment. One of `fast`, `complete`. The `fast`
evaluation will run the experiment on a small set of samples
(100), while the complete will run it either on the complete
dataset, or in all the samples indicated in the `indices`
field.
:param task: Task performed by the classifier. This can be one
of `classification` or `detection`, and will determine the
attack model/scenario to use for the evaluation.
:param indexes: List of indexes for specifying which samples to
use and in what order for the evaluation. It might be
useful for reproducing particular results or tests on a
specific subset of samples.
:param config_file: Path of json file to use as configuration
for the experiment. It could contain anchors, additional
parameters, task information.
:param preprocessing: Dictionary containing the key `mean` and
`std` for defining a preprocessing standardizer block.
"""
self._dataset_id = dataset_id
self._model_id = model_id
if not os.path.isfile(self._dataset_id):
raise ValueError("Dataset {} is not a valid path."
"".format(self._dataset_id))
if not os.path.isfile(self._model_id):
raise ValueError("Model {} is not a valid path."
"".format(self._model_id))
self._task = task
self._indexes = indexes
self._preprocessing = preprocessing
self._evaluation_mode = evaluation_mode
if self._evaluation_mode == 'fast':
if self._task == 'classification':
self._num_samples = 3
else:
self._num_samples = None
# load dataset and model
if dataset_id is not None:
self._load_dataset_by_id()
if model_id is not None:
self._load_model_by_id()
if attack in SUPPORTED_ATTACKS:
self._attack_cls = attack
else:
raise ValueError(f"Attack type {attack} not understood. "
f"It should be one of: {list(SUPPORTED_ATTACKS.keys())}.")
self._attack_params = attack_params if attack_params is not None else dict()
self._metric = metric
if self._metric not in ['classification-accuracy']:
raise ValueError("Evaluation metric {} not understood. "
"It should be one of: 'classification-accuracy' ... ."
"".format(self._metric))
if self._task == 'classification' and self._metric != 'classification-accuracy':
raise ValueError("Please, use 'classification-accuracy' as detection metric")
if perturbation_values is not None:
self._perturbation_values = perturbation_values
else:
# default value
self._perturbation_values = [0, 0.01, 0.02, 0.03, 0.04, 0.05]
self.cached_is_adv = None
self.cached_min_distance = None
self._batch_is_cached = None
if self._num_samples is None:
self._num_samples = self._validation_loader.dataset._samples.shape[0]
def _load_dataset_by_id(self):
# Dataset can be loaded from a local file path
data_loader = CustomDatasetLoader(path=self._dataset_id,
use_case=self._task,
batch_size=1,
shuffle=False,
num_samples=self._num_samples,
indexes=self._indexes)
self._validation_loader = data_loader.get_data()
self.data_max, self.data_min = data_loader.validation_dataset._samples.max(), \
data_loader.validation_dataset._samples.min()
self.input_scale = self.data_max - self.data_min
self.input_shape = self._validation_loader.dataset._samples[0].shape
self.n_output_classes = len(self._validation_loader.dataset.classes)
def _load_model_by_id(self):
self._model = ModelLoader(model_path=self._model_id, input_shape=self.input_shape,
preprocessing=self._preprocessing).load_model()
def prepare_attack(self):
if self._task == 'classification':
self.attack = AttackClassification(model=self._model, lb=self.data_min, ub=self.data_max)
else:
raise ValueError("Attack for task {} is not supported yet!".format(self._task))
def sec_eval_curve(self):
self.prepare_attack()
if not isinstance(self._perturbation_values, list):
raise ValueError("Perturbation values should "
"be a list of floats. Received {}"
"".format(self._perturbation_values))
results = []
batch_size = self._validation_loader.batch_size
self._batch_is_cached = [False for _ in range(len(self._validation_loader))]
self.cached_is_adv = torch.full(size=(self._num_samples,), fill_value=False)
self.cached_min_distance = torch.full(size=(self._num_samples,), fill_value=np.inf, dtype=torch.float64)
for eps in self._perturbation_values:
acc = []
for batch_idx, (samples, labels) in enumerate(self._validation_loader):
if self.attack.is_min_distance(self._attack_cls):
if self._batch_is_cached[batch_idx] is False:
is_adv, adv_points = self.attack.run(samples, labels, self._attack_cls, self._attack_params,
eps)
adv_points = torch.from_numpy(adv_points)
is_adv = torch.from_numpy(is_adv)
distances = (adv_points - samples).view(adv_points.shape[0], -1).norm(dim=1,
p=self.attack.attack_norm(
self._attack_cls))
distances[torch.logical_not(is_adv)] = np.inf
self.cached_is_adv[batch_idx * batch_size:
min((batch_idx + 1) * batch_size, self._num_samples)] = is_adv
self.cached_min_distance[batch_idx * batch_size:
min((batch_idx + 1) * batch_size, self._num_samples)] = distances
if eps > 0:
self._batch_is_cached[batch_idx] = True
else:
pass
else:
is_adv_batch = self.cached_is_adv[batch_idx * batch_size:
min((batch_idx + 1) * batch_size, self._num_samples)]
distances_batch = self.cached_min_distance[batch_idx * batch_size:
min((batch_idx + 1) * batch_size, self._num_samples)]
not_yet_adv = torch.nonzero(torch.logical_not(is_adv_batch))
if len(not_yet_adv) > 0:
is_adv, adv_points = self.attack.run(samples[not_yet_adv, ...], labels[not_yet_adv, ...],
self._attack_cls, self._attack_params, eps)
is_adv = torch.from_numpy(is_adv)
is_adv_batch[not_yet_adv[is_adv]] = is_adv
adv_points = torch.from_numpy(adv_points)
self.cached_is_adv[batch_idx * batch_size:
min((batch_idx + 1) * batch_size, self._num_samples)] = is_adv_batch
distances = (adv_points - samples).view(adv_points.shape[0], -1).norm(dim=1,
p=self.attack.attack_norm(
self._attack_cls))
distances_batch[is_adv_batch] = distances
self.cached_min_distance[batch_idx * batch_size:
min((batch_idx + 1) * batch_size, self._num_samples)] = distances_batch
perf = torch.logical_or(torch.logical_not(self.cached_is_adv),
torch.logical_and(self.cached_is_adv, self.cached_min_distance > eps)) \
.type(torch.FloatTensor).mean()
acc.append(perf)
avg_acc = np.array(acc).mean()
results.append(avg_acc)
results = np.array(results)
response = self.prepare_response(results)
return response
def generate_advx(self, samples, labels, eps):
# TODO fix this function, apply new APIs
self.prepare_attack()
adv_points = self.attack.run(samples, labels, eps)
return adv_points
def prepare_response(self, performances: np.ndarray):
"""
Returns the response object for a security evaluation.
:param performances: array containing a perf value for
each of the perturbation values
"""
if performances[0] == 0:
sec_value = np.array(-1)
else:
sec_value = np.mean(performances) / performances[0]
sec_levels = ((0.33, 0.66, 1.5), ("low", "medium", "high"))
# compute sec-level
sec_level = sec_levels[1][bisect.bisect_left(sec_levels[0], sec_value)]
eval_results = {"sec-level": sec_level,
"sec-value": sec_value.item(),
"sec-curve": {
"x-values": ["{:.3f}".format(v) for v in self._perturbation_values],
"y-values": performances.tolist()}}
return eval_results
|
{"hexsha": "547b411f84459cdc55c4e1e86a633f08597522ed", "size": 12171, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/adv/evaluation_manager.py", "max_stars_repo_name": "pralab/pandavision", "max_stars_repo_head_hexsha": "7a76f333127d5cbdf5a0af5a202cec50a2041c6d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-19T15:47:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T15:47:57.000Z", "max_issues_repo_path": "app/adv/evaluation_manager.py", "max_issues_repo_name": "pralab/pandavision", "max_issues_repo_head_hexsha": "7a76f333127d5cbdf5a0af5a202cec50a2041c6d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/adv/evaluation_manager.py", "max_forks_repo_name": "pralab/pandavision", "max_forks_repo_head_hexsha": "7a76f333127d5cbdf5a0af5a202cec50a2041c6d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.7125, "max_line_length": 120, "alphanum_fraction": 0.5556651056, "include": true, "reason": "import numpy", "num_tokens": 2434}
|
\documentclass[a4paper,11pt]{article}
\title{Example 3}
\author{My name}
\date{2011-01-05}
\begin{document}
\maketitle
\section{What's here}
This is our
second document.
It contains two paragraphs. The first line of a paragraph will be
indented, but not when it follows a heading.
% Here’s a comment.
ff fi fl ffi ffl -- ---
f\/f f\/i f\/l f\/f\/i f\/f\/l -\/- -\/-\/-
\end{document}
|
{"hexsha": "629f4001369e7f5114054ea0c293c06cef5fe52f", "size": 415, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapter02/9867_02_22.tex", "max_stars_repo_name": "eagleqian/LaTeX-Beginner-s-Guide", "max_stars_repo_head_hexsha": "49f6c9c8e0c9f7a6554e720c8a82978a5f5d1042", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-05-11T01:15:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T21:30:11.000Z", "max_issues_repo_path": "Chapter02/9867_02_22.tex", "max_issues_repo_name": "eagleqian/LaTeX-Beginner-s-Guide", "max_issues_repo_head_hexsha": "49f6c9c8e0c9f7a6554e720c8a82978a5f5d1042", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter02/9867_02_22.tex", "max_forks_repo_name": "eagleqian/LaTeX-Beginner-s-Guide", "max_forks_repo_head_hexsha": "49f6c9c8e0c9f7a6554e720c8a82978a5f5d1042", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2019-05-11T00:40:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T21:30:13.000Z", "avg_line_length": 21.8421052632, "max_line_length": 66, "alphanum_fraction": 0.6385542169, "num_tokens": 134}
|
From mathcomp Require Import ssreflect.
From Category.Base Require Import Logic Category Functor NatTran.
From Category.Instances Require Import NatTranComp FunctorCategory Product.ProductCategory.
Set Universe Polymorphism.
(* Currying *)
Module Currying.
Section Currying.
Context {C D E : Category}.
Variable F : Functor (ProductCat C D) E.
Program Definition Curry (X : Obj C) : Functor D E :=
{|
FApp := fun Y => FApp F (pair X Y);
FAppH := fun Y1 Y2 (g : Hom Y1 Y2)
=> FAppH F (pair (\Id X) g : @Hom (ProductCat C D) (pair X Y1) (pair X Y2))
|}.
Next Obligation.
Proof.
rewrite <- FAppH_comp_eq.
rewrite /=.
rewrite Hom_IdL.
reflexivity.
Qed.
Next Obligation.
rewrite Functor_id_eq.
reflexivity.
Qed.
Program Definition Curry_Hom {X1 X2 : Obj C} (f : Hom X1 X2) : NatTran (Curry X1) (Curry X2) :=
{|
NApp := fun Y => FAppH F (pair f (\Id Y) : @Hom (ProductCat C D) (pair X1 Y) (pair X2 Y))
|}.
Next Obligation.
Proof.
repeat rewrite <- FAppH_comp_eq.
rewrite /=.
repeat rewrite Hom_IdL.
repeat rewrite Hom_IdR.
reflexivity.
Qed.
End Currying.
End Currying.
Program Definition FunctorCurrying {C D E : Category} (F : Functor (ProductCat C D) E) : Functor C (FunctorCategory D E) :=
{|
FApp := fun X => Currying.Curry F X;
FAppH := fun X1 X2 f => Currying.Curry_Hom F f
|}.
Next Obligation.
Proof.
apply: ToNatTranEq.
rewrite /=.
apply: functional_extensionality_dep.
move => W.
rewrite <- FAppH_comp_eq.
rewrite /=.
rewrite Hom_IdL.
reflexivity.
Qed.
Next Obligation.
Proof.
apply: ToNatTranEq.
rewrite /=.
apply: functional_extensionality_dep.
move => W.
rewrite Functor_id_eq.
reflexivity.
Qed.
(* Uncurrying *)
Program Definition FunctorUncurrying {C D E : Category} (G : Functor C (FunctorCategory D E))
: Functor (ProductCat C D) E :=
{|
FApp := fun X => FApp (FApp G (fst X)) (snd X);
FAppH := fun X1 X2 p =>
FAppH (FApp G (fst X2)) (snd p) \o NApp (FAppH G (fst p)) (snd X1)
|}.
Next Obligation.
Proof.
rewrite /=.
repeat rewrite FAppH_comp_eq.
rewrite (Hom_assoc (FAppH (FApp G o) h2) (FAppH (FApp G o) h0)).
rewrite (Hom_assoc (FAppH (FApp G o) h2) (NApp (FAppH G h1) o2)).
apply: f_equal.
rewrite /=.
repeat rewrite <- Hom_assoc.
apply: (f_equal (fun a => a \o NApp (FAppH G h) o4)).
rewrite <- NatTran_rectangle.
reflexivity.
Qed.
Next Obligation.
Proof.
rewrite /=.
rewrite Functor_id_eq.
rewrite Hom_IdL.
rewrite Functor_id_eq.
rewrite /=.
reflexivity.
Qed.
Lemma F_currying_uncurrying_eq {C D E : Category} :
forall (G : Functor C (FunctorCategory D E)),
FunctorCurrying (FunctorUncurrying G) = G.
Proof.
move => G.
apply: ToFunctorEq => X1 X2 f.
unfold Hom_eq'.
have: (forall (X : Obj C), FApp (FunctorCurrying (FunctorUncurrying G)) X = FApp G X).
{
move => X.
apply: ToFunctorEq => Y1 Y2 g.
unfold Hom_eq'.
rewrite /=.
apply: eq_Hom_eq'.
rewrite Functor_id_eq.
rewrite /=.
rewrite Hom_IdR.
reflexivity.
}
move => Curry_eqH.
exists (Curry_eqH X1).
exists (Curry_eqH X2).
rewrite /=.
apply: ToNatTranEq.
apply: functional_extensionality_dep.
move => Y.
rewrite /=.
rewrite Functor_id_eq.
rewrite Hom_IdL.
have: (forall (X : Obj C), FApp (FApp G X) Y = FApp (FApp (FunctorCurrying (FunctorUncurrying G)) X) Y).
{
move => X.
move: (Curry_eqH X) =>->.
reflexivity.
}
move => Curry_eqH'.
transitivity (NApp (HomId' (Curry_eqH X2)) Y \o (HomId' (Curry_eqH' X2)) \o NApp (FAppH G f) Y).
{
apply: f_equal.
simpl in Curry_eqH'.
rewrite HomId'_id.
rewrite Hom_IdL.
reflexivity.
}
transitivity (NApp (FAppH G f) Y \o NApp (HomId' (Curry_eqH X1)) Y \o HomId' (Curry_eqH' X1)).
{
rewrite (NApp_HomId'_eq (Curry_eqH X2) Y (eq_sym (Curry_eqH' X2))).
rewrite (NApp_HomId'_eq (Curry_eqH X1) Y (eq_sym (Curry_eqH' X1))).
rewrite /=.
rewrite HomId'_sym_id.
rewrite Hom_IdR.
rewrite <- Hom_assoc.
rewrite HomId'_sym_id.
rewrite Hom_IdL.
reflexivity.
}
simpl in Curry_eqH'.
rewrite HomId'_id.
rewrite Hom_IdR.
reflexivity.
Qed.
Lemma F_uncurrying_currying_eq {C D E : Category} :
forall (F : Functor (ProductCat C D) E),
FunctorUncurrying (FunctorCurrying F) = F.
Proof.
move => F.
apply: ToFunctorEq.
case => X1 X2.
case => Y1 Y2.
case.
rewrite /=.
move => f1 f2.
apply eq_Hom_eq'.
rewrite <- FAppH_comp_eq.
rewrite /=.
rewrite Hom_IdL.
rewrite Hom_IdR.
reflexivity.
Qed.
|
{"author": "k27c8ff627uxz", "repo": "category_theory", "sha": "d5568b2ba04120a4f0e5bc7f2d61297c3cf42b9e", "save_path": "github-repos/coq/k27c8ff627uxz-category_theory", "path": "github-repos/coq/k27c8ff627uxz-category_theory/category_theory-d5568b2ba04120a4f0e5bc7f2d61297c3cf42b9e/src/Instances/Product/FunctorCurrying.v"}
|
#default implementation
@impl begin
struct ScoreGetLogScore end
function get_log_score(sf::Score{I}, i::I)::AbstractFloat where {I}
return log(get_score(sf, i))
end
end
|
{"hexsha": "b043e16e03b2e8fc4c1352d3e602c0b096265d24", "size": 190, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/sfuncs/score/score.jl", "max_stars_repo_name": "p2t2/Scruff.jl", "max_stars_repo_head_hexsha": "e6d42e1c9bb427f33d01f443ebaf3ef243fa0bde", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-30T14:00:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:00:24.000Z", "max_issues_repo_path": "src/sfuncs/score/score.jl", "max_issues_repo_name": "p2t2/Scruff.jl", "max_issues_repo_head_hexsha": "e6d42e1c9bb427f33d01f443ebaf3ef243fa0bde", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2022-03-29T18:18:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T21:14:28.000Z", "max_forks_repo_path": "src/sfuncs/score/score.jl", "max_forks_repo_name": "p2t2/Scruff.jl", "max_forks_repo_head_hexsha": "e6d42e1c9bb427f33d01f443ebaf3ef243fa0bde", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1111111111, "max_line_length": 71, "alphanum_fraction": 0.6947368421, "num_tokens": 51}
|
import argparse
import os
import re
import cv2
import fnmatch
import numpy as np
from sift import SIFT
from surf import SURF
from vlad import VLAD
from vgg import VGG
from superpoint import SuperPointLocalFeature
from pca_global_descriptor import PCAGlobalDescriptor
def parse_args():
parser = argparse.ArgumentParser(
description='Training a model for image retrieval',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--data',
type=str,
default='../imgret_data/',
help='path to directory with training images')
parser.add_argument(
'--image-file-name',
type=str,
required=True,
help='testing image file name')
parser.add_argument(
'--model-dir-path',
type=str,
default='../imgret_data/',
help='path to directory with model file')
parser.add_argument(
'--model-file-name',
type=str,
default='model.npz',
help='resulted model file name')
args = parser.parse_args()
return args
def get_test_image_file_paths(train_image_dir_path):
image_file_names = fnmatch.filter(os.listdir(train_image_dir_path), '*.jpg')
image_file_names = [n for n in image_file_names if os.path.isfile(os.path.join(train_image_dir_path, n))]
image_file_names.sort(
key=lambda var: ['{:10}'.format(int(x)) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
image_file_paths = [os.path.join(train_image_dir_path, n) for n in image_file_names]
return image_file_paths
def calc_lcluster_centers(train_image_dir_path,
local_feature):
image_file_paths = get_test_image_file_paths(train_image_dir_path)
ldescriptors_list = local_feature.calc_descriptors_list(image_file_paths)
ldescriptors = np.vstack(ldescriptors_list)
lcluster_centers = VLAD.calc_lcluster_centers(ldescriptors)
return lcluster_centers
def calc_pca(train_image_dir_path,
local_feature,
global_feature):
image_file_paths = get_test_image_file_paths(train_image_dir_path)
ldescriptors_list = local_feature.calc_descriptors_list(image_file_paths)
gdescriptor_list = []
for ldescriptors in ldescriptors_list:
gdescriptor = global_feature.calc_descriptor(ldescriptors=ldescriptors)
gdescriptor_list += [gdescriptor]
gdescriptors = np.vstack(gdescriptor_list)
pca_mean, pca_eigenvectors = PCAGlobalDescriptor.calc_pca(
gdescriptors=gdescriptors,
pca_length=256)
return pca_mean, pca_eigenvectors
def main():
args = parse_args()
model_file_path = os.path.join(args.model_dir_path, args.model_file_name)
model = dict(np.load(model_file_path))
image_size = tuple(model["image_size"])
keypoint_image_border_size = int(model["keypoint_image_border_size"])
max_keypoint_count = int(model["max_keypoint_count"])
ldescriptor_length = int(model["ldescriptor_length"])
if str(model["local_feature"]) == "SIFT":
sift_contrast_threshold = float(model["sift_contrast_threshold"])
sift_edge_threshold = int(model["sift_edge_threshold"])
local_feature = SIFT(
image_size=image_size,
keypoint_image_border_size=keypoint_image_border_size,
max_keypoint_count=max_keypoint_count,
ldescriptor_length=ldescriptor_length,
contrast_threshold=sift_contrast_threshold,
edge_threshold=sift_edge_threshold)
elif str(model["local_feature"]) == "SURF":
surf_hessian_threshold = float(model["surf_hessian_threshold"])
surf_extended = bool(model["surf_extended"])
surf_upright = bool(model["surf_upright"])
local_feature = SURF(
image_size=image_size,
keypoint_image_border_size=keypoint_image_border_size,
max_keypoint_count=max_keypoint_count,
ldescriptor_length=ldescriptor_length,
hessian_threshold=surf_hessian_threshold,
extended=surf_extended,
upright=surf_upright)
elif str(model["local_feature"]) == "VGG":
vgg_use_scale_orientation = bool(model["vgg_use_scale_orientation"])
local_feature = VGG(
image_size=image_size,
keypoint_image_border_size=keypoint_image_border_size,
max_keypoint_count=max_keypoint_count,
ldescriptor_length=ldescriptor_length,
use_scale_orientation=vgg_use_scale_orientation)
elif str(model["local_feature"]) == "SP":
sp_weights_path = str(model["sp_weights_path"])
local_feature = SuperPointLocalFeature(
image_size=image_size,
keypoint_image_border_size=keypoint_image_border_size,
max_keypoint_count=max_keypoint_count,
ldescriptor_length=ldescriptor_length,
weights_path=sp_weights_path)
else:
raise ValueError("local_feature")
lcluster_centers = model["lcluster_centers"]
global_feature = VLAD(lcluster_centers=lcluster_centers)
pca_mean = model["pca_mean"]
pca_eigenvectors = model["pca_eigenvectors"]
img_desc_calc = PCAGlobalDescriptor(
local_feature=local_feature,
global_feature=global_feature,
pca_mean=pca_mean,
pca_eigenvectors=pca_eigenvectors)
image_file_path = os.path.join(args.data, args.image_file_name)
image = cv2.imread(filename=image_file_path, flags=0)
image = cv2.resize(image, local_feature.image_size)
descr = img_desc_calc.calc_descriptor(image)
print(descr)
if __name__ == '__main__':
main()
|
{"hexsha": "47307138e6af5ba3ceaab7e6fe709f76d6426a84", "size": 5646, "ext": "py", "lang": "Python", "max_stars_repo_path": "eval.py", "max_stars_repo_name": "osmr/imgret", "max_stars_repo_head_hexsha": "28ac6461de815e37539f1893c29d4af6d1c1647d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-12-29T06:58:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T07:19:54.000Z", "max_issues_repo_path": "eval.py", "max_issues_repo_name": "osmr/imgret", "max_issues_repo_head_hexsha": "28ac6461de815e37539f1893c29d4af6d1c1647d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eval.py", "max_forks_repo_name": "osmr/imgret", "max_forks_repo_head_hexsha": "28ac6461de815e37539f1893c29d4af6d1c1647d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-03-14T13:00:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T07:19:56.000Z", "avg_line_length": 37.3907284768, "max_line_length": 114, "alphanum_fraction": 0.7065178888, "include": true, "reason": "import numpy", "num_tokens": 1228}
|
Require Import ucos_include.
Open Scope code_scope.
Open Scope Z_scope.
Open Scope int_scope.
Lemma rh_tcbls_mqls_p_getmsg_hold:
forall mqls tcbls ct a v vl qmax wl,
RH_TCBList_ECBList_P mqls tcbls ct ->
EcbMod.get mqls a =
Some
(absmsgq (v:: vl) qmax, wl) ->
RH_TCBList_ECBList_P (EcbMod.set mqls a (absmsgq vl qmax, wl)) tcbls ct.
Proof.
intros.
unfold RH_TCBList_ECBList_P in *.
destruct H as (Hr1 & Hr2 & Hr3 & Hr4).
splits.
destruct Hr1.
splits; intros.
destruct (tidspec.beq a eid) eqn : eq1.
unfold get in *; simpl in *.
lets Hts : tidspec.beq_true_eq eq1 .
substs.
rewrite EcbMod.set_a_get_a in H2; auto.
destruct H2; inverts H2; eauto.
rewrite EcbMod.set_a_get_a' in H2; auto.
eapply H; eauto.
destruct (tidspec.beq a eid) eqn : eq1.
lets Hts : tidspec.beq_true_eq eq1 .
substs.
rewrite EcbMod.set_a_get_a; auto.
apply H1 in H2; auto.
do 4 destruct H2.
unfold get in *; simpl in *.
rewrite H2 in H0; inverts H0.
eauto.
rewrite EcbMod.set_a_get_a'; auto.
eapply H1; eauto.
destruct Hr2.
splits; intros.
destruct (tidspec.beq a eid) eqn : eq1.
lets Hts : tidspec.beq_true_eq eq1; substs.
rewrite EcbMod.set_a_get_a in H2; auto.
destruct H2; inverts H2; eauto.
rewrite EcbMod.set_a_get_a' in H2; auto.
eapply H; eauto.
destruct (tidspec.beq a eid) eqn : eq1.
lets Hts : tidspec.beq_true_eq eq1 ; subst.
rewrite EcbMod.set_a_get_a; auto.
apply H1 in H2; auto.
do 3 destruct H2.
unfold get in *; simpl in *.
rewrite H2 in H0; inverts H0.
eauto.
rewrite EcbMod.set_a_get_a'; auto.
eapply H1; eauto.
destruct Hr3.
splits; intros.
destruct (tidspec.beq a eid) eqn : eq1.
lets Hts : tidspec.beq_true_eq eq1; substs.
rewrite EcbMod.set_a_get_a in H2; auto.
destruct H2; inverts H2; eauto.
rewrite EcbMod.set_a_get_a' in H2; auto.
eapply H; eauto.
destruct (tidspec.beq a eid) eqn : eq1.
lets Hts : tidspec.beq_true_eq eq1; substs.
rewrite EcbMod.set_a_get_a; auto.
apply H1 in H2; auto.
do 3 destruct H2.
unfold get in *; simpl in *.
rewrite H2 in H0; inverts H0.
eauto.
rewrite EcbMod.set_a_get_a'; auto.
eapply H1; eauto.
destruct Hr4.
splits; intros.
destruct (tidspec.beq a eid) eqn : eq1.
lets Hts : tidspec.beq_true_eq eq1; substs.
rewrite EcbMod.set_a_get_a in H2; auto.
destruct H2; inverts H2; eauto.
rewrite EcbMod.set_a_get_a' in H2; auto.
eapply H; eauto.
destruct (tidspec.beq a eid) eqn : eq1.
lets Hts : tidspec.beq_true_eq eq1; substs.
rewrite EcbMod.set_a_get_a; auto.
apply H1 in H2; auto.
do 4 destruct H2.
unfold get in *; simpl in *.
rewrite H2 in H0; inverts H0.
eauto.
rewrite EcbMod.set_a_get_a'; auto.
eapply H1; eauto.
elim H1; intros.
unfold RH_TCBList_ECBList_MUTEX_OWNER in *.
intros.
assert (eid = a \/ eid <> a) by tauto.
destruct H5; intros.
subst eid.
rewrite EcbMod.set_a_get_a in H4.
inversion H4.
apply CltEnvMod.beq_refl.
rewrite EcbMod.set_a_get_a' in H4.
eapply H3; eauto.
apply tidspec.neq_beq_false; auto.
Qed.
Open Scope int_scope.
Lemma msgqnode_p_nomsg:
forall qptr qst qend qin qout size qens qblk mblk ml A l,
Int.ltu ($ 0) qens = false ->
RLH_ECBData_P
(
DMsgQ l (qptr
:: qst
:: qend
:: qin
:: qout
:: Vint32 size :: Vint32 qens :: Vptr qblk :: nil)
mblk ml) A ->
exists qmax wl, A = (absmsgq nil qmax,wl).
Proof.
intros.
unfolds in H0.
destruct A.
destruct e; tryfalse.
simp join.
funfold H0.
funfold H1.
funfold H2.
lets Heq : int_ltu_false_eq0 H.
rewrite Heq in H1.
apply eq_sym in H1.
apply zof_nat_eq_zero_zero in H1.
apply length_zero_nil in H1.
subst l0.
eauto.
Qed.
Lemma msgqnode_p_hold_end:
forall b sti qens outi qend qptr qst qin qsize qblk mblk ml hml hsize wl l,
id_addrval qblk msgqueuetbl os_ucos_h.OS_Q_FREEBLK = Some (b, sti) ->
Int.ltu ($ 0) qens =true ->
qend = Vptr (b, (outi +ᵢ Int.mul ($ 1) ($ 4))) ->
WellformedOSQ
(qptr
:: qst
:: qend
:: qin
:: Vptr (b, outi)
:: Vint32 qsize :: Vint32 qens :: Vptr qblk :: nil) ->
RLH_ECBData_P
(DMsgQ l
(qptr
:: qst
:: qend
:: qin
:: Vptr (b, outi)
:: Vint32 qsize :: Vint32 qens :: Vptr qblk :: nil)
mblk ml)
(absmsgq (nth_val' (Z.to_nat ((Int.unsigned outi - Int.unsigned sti) / 4)) ml
:: hml) hsize, wl) ->
RLH_ECBData_P
(DMsgQ l
(qptr
:: qst
:: qend
:: qin
:: qst
:: Vint32 qsize
:: Vint32 (qens -ᵢ $ 1) :: Vptr qblk :: nil)
mblk ml ) (absmsgq hml hsize, wl).
Proof.
introv Hid Hint Hqend Hwl Hrl .
funfold Hwl.
destruct Hrl.
destruct H0 as (Hm1 & Hm2 & Hm3 & Hecb).
funfold H.
funfold Hm1.
funfold Hm2.
unfold arrayelem_addr_right in *.
unfold qend_right, ptr_offset_right,ptr_minus, distance in *.
simp join.
fsimpl.
simpl in *.
destruct x5.
apply eq_sym in H5.
subst i0.
inverts Hid.
remember ( i1+ᵢ($ 4+ᵢInt.zero)) as k.
inverts H5.
assert (Int.mul ($ 1) ($ 4) = $ 4).
clear -i.
mauto.
rewrite H5 in *.
rewrite <- H4 in *.
rewrite Int.repr_unsigned in *.
assert (Int.unsigned ($ 4) = 4)%Z.
clear -i.
mauto.
rewrite H8 in *.
clear H5 H8.
splits.
unfolds.
do 7 eexists;splits; try solve [ unfolds; simpl; eauto].
assert (Int.unsigned (Int.divu ($ 4-ᵢ$ 4) ($ 4)) = 0)%Z.
clear.
int auto.
int auto.
rewrite Zdiv_0_l.
omega.
simpl.
rewrite Zdiv_0_l.
omega.
rewrite H5 in *.
clear H5.
splits.
splits.
introv Hlt.
lets Hed : math_le_xyz H1 H H21; eauto.
rewrite Int.repr_unsigned; eauto.
destruct Hed as (Has & Hass & Hsd) .
apply z_split in Has.
destruct Has as [Has1 | Has2].
lets Hef : int_ltu_eq_false Hint.
apply H15 in Has1.
destruct Has1.
rewrite Hef in H5.
destruct H5; tryfalse.
destruct H5 as (Hs1 & Hs2).
rewrite <- Hass in Hs2.
remember ( ∘(Int.unsigned (Int.divu (outi-ᵢ$ 4) ($ 4)))) as N.
remember (∘(Int.unsigned (Int.divu (i-ᵢ$ 4) ($ 4)))) as M.
destruct H13 as (His & Hsi).
lets Hlss : isptr_length_nth His.
rewrite vallist_seg_Sn in Hs2.
simpl in Hs2.
inverts Hs2; auto.
rewrite Int.unsigned_repr in Hsd.
rewrite <- Hsd in Hlss.
auto.
clear -N.
mauto.
apply H14 in Has2.
rewrite <- Hass in Has2.
rewrite vallist_seg_Sn in Has2.
simpl in Has2.
inverts Has2; auto.
rewrite Int.unsigned_repr in Hsd.
rewrite Hsd .
destruct H13 as (His & Hsi).
lets Hlss : isptr_length_nth His.
auto.
clear -l.
mauto.
introv Hf.
assert ( 0 <= Int.unsigned (Int.divu (i-ᵢ$ 4) ($ 4)) )%Z.
apply Int.unsigned_range.
false.
omega.
introv Hf.
lets Hed : math_le_xyz H1 H H21; eauto.
rewrite Int.repr_unsigned; auto.
rewrite Hf in *.
assert (0<=Int.unsigned (Int.divu (outi-ᵢ$ 4) ($ 4)) )%Z as Has.
apply Int.unsigned_range.
assert (0<Int.unsigned (Int.divu (outi-ᵢ$ 4) ($ 4)) \/
0=Int.unsigned (Int.divu (outi-ᵢ$ 4) ($ 4)) )%Z as Hc.
omega.
destruct Hc as [Hc1 | Hc2].
apply H14 in Hc1.
destruct Hed as (He1 & He2 & He3).
rewrite vallist_seg_eqnil in *.
rewrite <- He2 in Hc1.
rewrite vallist_seg_Sn in Hc1.
simpl in Hc1.
inverts Hc1.
left.
splits; auto.
simpl in H0.
clear - H0.
mauto.
destruct H13 as (His & Hsi).
lets Hlss : isptr_length_nth His.
rewrite He3.
rewrite Int.unsigned_repr.
auto.
clear -l.
mauto.
apply H15 in Hc2.
left.
destruct Hc2.
destruct H5.
inverts H8.
destruct H5.
destruct Hed as (He1 & He2 & He3).
rewrite vallist_seg_eqnil in *.
rewrite <- He2 in H8.
rewrite vallist_seg_Sn in H8.
simpl in H8.
inverts H8.
simpl in H0.
split; auto.
clear - H0.
mauto.
destruct H13 as (His & Hsi).
lets Hlss : isptr_length_nth His.
rewrite He3.
rewrite Int.unsigned_repr.
auto.
clear -l.
mauto.
destruct H13.
auto.
unfolds.
eexists.
splits.
unfolds; simpl; auto.
eapply int_list_length_dec; auto.
unfolds.
eexists.
splits; try unfolds; simpl; eauto.
rewrite Int.repr_unsigned; auto.
intros.
eapply Hm3.
introv Hf; tryfalse.
intros.
apply Hecb in H5.
tryfalse.
Qed.
Lemma msgqnode_p_hold_no_end:
forall b sti qens outi qend qptr qst qin qsize qblk mblk ml hml hsize wl l,
id_addrval qblk msgqueuetbl os_ucos_h.OS_Q_FREEBLK = Some (b, sti) ->
length ml = ∘OS_MAX_Q_SIZE ->
WellformedOSQ
(qptr
:: qst
:: qend
:: qin
:: Vptr (b, outi)
:: Vint32 qsize :: Vint32 qens :: Vptr qblk :: nil) ->
RLH_ECBData_P
(DMsgQ l
(qptr
:: qst
:: qend
:: qin
:: Vptr (b, outi)
:: Vint32 qsize :: Vint32 qens :: Vptr qblk :: nil)
mblk ml)
(absmsgq (nth_val' (Z.to_nat ((Int.unsigned outi - Int.unsigned sti) / 4)) ml
:: hml) hsize, wl) ->
RLH_ECBData_P
(DMsgQ l
(qptr
:: qst
:: qend
:: qin
:: Vptr (b, outi +ᵢ Int.mul ($ 1) ($ 4))
:: Vint32 qsize
:: Vint32 (qens -ᵢ $ 1) :: Vptr qblk :: nil)
mblk ml ) (absmsgq hml hsize, wl).
Proof.
introv Hid Hlen Hwl Hrl .
funfold Hwl.
destruct Hrl.
destruct H0 as (Hm1 & Hm2 & Hm3 & Hecb).
funfold H.
funfold Hm1.
funfold Hm2.
unfold arrayelem_addr_right in *.
unfold qend_right, ptr_offset_right,ptr_minus, distance in *.
simp join.
fsimpl.
simpl in *.
destruct x5.
apply eq_sym in H5.
subst i0.
inverts Hid.
remember ( i2+ᵢ($ 4+ᵢInt.zero)) as k.
inverts H5.
assert (Int.mul ($ 1) ($ 4) = $ 4).
clear -i.
mauto.
rewrite H5 in *.
rewrite <- H4 in *.
rewrite Int.repr_unsigned in *.
assert (Int.unsigned ($ 4) = 4).
clear -i.
mauto.
rewrite H8 in *.
clear H5 H8.
splits.
unfolds.
do 7 eexists;splits; try solve [ unfolds; simpl; eauto].
splits.
splits.
introv Hlt.
lets Hed : math_xyz_prop2' H1 H Hlt; eauto.
destruct Hed as (Has & Hass & Hsd) .
lets Hls : H12 Has.
assert ( Int.unsigned ($ 4) = 4).
clear -i.
mauto.
rewrite H5 in *.
clear H5.
rewrite <- Hsd in Hls.
rewrite <- Hass .
eapply vallistseg_n_m_split; eauto.
unfold nat_of_Z.
apply le_change.
eapply Z2Nat.inj_le; try omega;try apply Int.unsigned_range.
destruct H13.
rewrite Hlen.
unfold OS_MAX_Q_SIZE in H11.
unfold Pos.to_nat.
simpl.
eapply math_le_trans; eauto.
introv Hf.
lets Heas :math_xyz_prop3' Hf; eauto.
destruct Heas as (Hdisj & Hseq & He).
assert ( Int.unsigned ($ 4) = 4).
clear -i.
mauto.
rewrite H5 in *.
clear H5.
rewrite <- Hseq in *.
rewrite <- He in *.
destruct Hdisj as [Hd1 | Hd2].
lets Hls : H14 Hd1.
eapply vallist_seg_ltunm_prop; eauto.
eapply list_maxsize_le; eauto.
lets Hrs : H15 Hd2.
destruct Hrs.
destruct H5 as (Hf1 & Hf2).
inverts Hf2.
destruct H5 as (Hf1 & Hf2).
rewrite Hd2 in *.
eapply vallist_seg_ltunm_prop; eauto.
eapply list_maxsize_le; eauto.
introv Hf.
lets Heas :math_xyz_prop4' Hf; eauto.
destruct Heas as (Hdisj & Hseq & He).
assert ( Int.unsigned ($ 4) = 4).
clear -i.
mauto.
rewrite H5 in *.
clear H5.
rewrite Hf.
rewrite <- Hseq in *.
rewrite <- He in *.
lets Hres : H12 Hdisj.
rewrite Hf in Hres.
rewrite <- Hseq in Hres.
rewrite vallist_seg_Sn in Hres.
inverts Hres.
left.
simpl in H0.
splits; auto.
clear - H0.
mauto.
rewrite Hseq.
rewrite <- Hf.
rewrite Hlen.
unfold OS_MAX_Q_SIZE in H11.
unfold Pos.to_nat.
simpl.
eapply math_le_trans; eauto.
destruct H13; auto.
unfolds.
eexists.
splits.
unfolds; simpl; eauto.
eapply int_list_length_dec; auto.
unfolds.
eexists.
splits; try unfolds; simpl; eauto.
rewrite Int.repr_unsigned; auto.
intros.
eapply Hm3.
introv Hf; tryfalse.
intros.
apply Hecb in H5.
tryfalse.
Qed.
Lemma h_has_same_msg:
forall b qptr qst qend qin outi qsize qens qblk sti l qblkm vl hml,
Int.ltu ($ 0) qens = true ->
id_addrval qblk msgqueuetbl os_ucos_h.OS_Q_FREEBLK = Some (b, sti) ->
length vl = ∘OS_MAX_Q_SIZE ->
WellformedOSQ
(qptr
:: qst
:: qend
:: qin
:: Vptr (b, outi)
:: qsize :: Vint32 qens :: Vptr qblk:: nil) ->
RLH_ECBData_P (DMsgQ l
(qptr
:: qst
:: qend
:: qin
:: Vptr (b, outi)
:: qsize :: Vint32 qens :: Vptr qblk:: nil)
qblkm vl) hml ->
exists vl' max wl, hml = (absmsgq ((nth_val' (Z.to_nat ((Int.unsigned outi - Int.unsigned sti) / 4)) vl) :: vl') max , wl).
Proof.
introv Hint Hid Hlen Hwl Hrl .
funfold Hwl.
unfold arrayelem_addr_right in *.
unfold qend_right, ptr_offset_right,ptr_minus, distance in *.
simp join.
fsimpl.
simpl in *.
destruct x5.
apply eq_sym in Hid.
inverts Hid.
remember ( i2+ᵢ($ 4+ᵢInt.zero)) as k.
inverts H5.
destruct hml.
destruct e; tryfalse.
destruct Hrl as (Hm1 & Hm2 & Hm3).
destruct Hm3 as (Hm3 & _).
funfold Hm1.
funfold Hm2.
funfold Hm3.
unfold distance in *.
simpl in *.
rewrite Int.repr_unsigned in *.
lets Hes : int_nat_ltu_lt H7 Hint.
lets Hls : list_length_lt Hes.
destruct Hls as (xx & ll & lt).
subst l0.
assert ( Int.unsigned (Int.divu (i0-ᵢ$ 4) ($ 4)) >
Int.unsigned (Int.divu (outi-ᵢ$ 4) ($ 4)) \/
Int.unsigned (Int.divu (i0-ᵢ$ 4) ($ 4)) <
Int.unsigned (Int.divu (outi-ᵢ$ 4) ($ 4)) \/
Int.unsigned (Int.divu (i0-ᵢ$ 4) ($ 4)) =
Int.unsigned (Int.divu (outi-ᵢ$ 4) ($ 4)) ) by omega.
destruct H5 as [Ha1 | [Ha2 | Ha3] ].
apply H19 in Ha1.
lets Has : vallist_seg_prop_eq Ha1.
lets Heq : math_out_start_eq' H10 H2 H11; eauto.
rewrite <- Heq.
subst xx.
do 3 eexists; eauto.
apply H21 in Ha2.
lets Heq : math_out_start_eq' H10 H2 H11; eauto.
rewrite <- Heq.
remember (vallist_seg 0 ∘(Int.unsigned (Int.divu (i0-ᵢ$ 4) ($ 4))) vl) as ls.
assert ( (∘(Int.unsigned m) <= length vl)%nat).
eapply list_maxsize_le; eauto.
lets Hdas: vallist_seg_prop H2 H5.
lets Hsp : list_append_split Hdas Ha2.
simp join.
rewrite app_comm_cons in Ha2.
apply app_inv_tail in Ha2.
lets Has : vallist_seg_prop_eq Ha2.
subst xx.
do 3 eexists; eauto.
apply H22 in Ha3.
destruct Ha3.
simp join; tryfalse.
destruct H5.
lets Heq : math_out_start_eq' H10 H2 H11; eauto.
rewrite <- Heq.
remember (vallist_seg 0 ∘(Int.unsigned (Int.divu (i0-ᵢ$ 4) ($ 4))) vl) as ls.
assert ( (∘(Int.unsigned m) <= length vl)%nat).
eapply list_maxsize_le; eauto.
lets Hdas: vallist_seg_prop H2 H16.
lets Hsp : list_append_split Hdas H9.
simp join.
rewrite app_comm_cons in H9.
apply app_inv_tail in H9.
lets Has : vallist_seg_prop_eq H9.
subst xx.
do 3 eexists; eauto.
Qed.
Lemma get_wellformedosq_end:
forall x qptr st b i qin qout size ens qfr,
ens = Vint32 x ->
Int.ltu Int.zero x = true ->
WellformedOSQ
(qptr
:: st
:: Vptr (b, i +ᵢ Int.mul ($ 1) ($ 4))
:: qin :: qout :: size :: ens :: qfr :: nil) ->
WellformedOSQ
(qptr
:: st
:: Vptr (b, i +ᵢ Int.mul ($ 1) ($ 4))
:: qin
:: st
:: size
:: val_inj (memory.sub ens (Vint32 (Int.repr 1)) Tint16)
:: qfr :: nil).
Proof.
intros.
unfold WellformedOSQ in *.
simp join.
unfold V_OSQEnd in *.
unfold V_OSQSize in *.
unfold V_OSQStart in *.
unfold V_OSQOut in *.
unfold V_qfreeblk in *.
simpl in *.
unfold V_OSQIn in *;simpl in *.
inverts H3.
inverts H4.
inverts H5.
inverts H6.
inverts H1.
do 7 eexists;splits;eauto.
splits;auto.
unfolds.
exists 0%nat.
inverts H2.
splits;try unfolds;simpl;auto.
destruct x5.
rewrite Pos.eqb_refl.
rewrite Int.sub_idem.
unfold Int.modu.
rewrite Int.unsigned_repr.
unfold Int.zero.
rewrite Int.unsigned_repr.
assert (0 mod 4 = 0).
unfold Z.modulo.
unfold Z.div_eucl.
auto.
rewrite H.
split.
unfolds.
remember (zlt (Int.unsigned i0) (Int.unsigned i0)) as X.
destruct X;auto.
omega.
auto.
rewrite max_unsigned_val.
omega.
rewrite max_unsigned_val.
omega.
destruct x5.
rewrite Pos.eqb_refl.
simpl.
assert ((Int.unsigned (Int.divu (i0 -ᵢ i0) ($ 4))) = 0).
rewrite Int.sub_idem.
unfold Int.divu.
unfold Int.zero.
rewrite Int.unsigned_repr.
rewrite Int.unsigned_repr.
rewrite Int.unsigned_repr.
auto.
rewrite max_unsigned_val.
omega.
rewrite max_unsigned_val.
omega.
rewrite Int.unsigned_repr.
rewrite Int.unsigned_repr.
assert (0/4 =0).
auto.
rewrite H.
rewrite max_unsigned_val.
omega.
rewrite max_unsigned_val.
omega.
rewrite max_unsigned_val.
omega.
destruct x6.
rewrite H.
simpl.
auto.
clear -H8.
remember (Int.unsigned x4) as X.
clear -H8.
unfold nat_of_Z.
assert (Z.to_nat 1 = 1%nat).
simpl; auto.
rewrite <- H.
eapply Z2Nat.inj_le; try omega.
Qed.
Lemma get_wellformedosq_end':
forall x qptr st b i qin size ens qfr qend,
ens = Vint32 x ->
qend <> Vptr (b, i +ᵢ Int.mul ($ 1) ($ 4)) ->
WellformedOSQ
(qptr
:: st
:: qend
:: qin :: Vptr (b, i) :: size :: ens :: qfr :: nil) ->
WellformedOSQ
(qptr
:: st
:: qend
:: qin
:: Vptr (b, i +ᵢ Int.mul ($ 1) ($ 4))
:: size
:: val_inj (memory.sub ens (Vint32 (Int.repr 1)) Tint16)
:: qfr :: nil).
Proof.
introv Hens Hq.
intros.
funfold H.
unfold WellformedOSQ in *.
do 7 eexists;splits;eauto;try unfolds; simpl; eauto.
splits;auto.
unfolds.
unfold qend_right in *.
unfold arrayelem_addr_right in *.
destruct H9.
destruct H10.
unfold ptr_offset_right in *.
unfold ptr_minus in *.
destruct x1.
destruct x2.
destruct x5.
destruct x6.
simpl in H5.
inverts H5.
remember ((b =? b2)%positive ) as Hbool.
destruct Hbool; simp join; tryfalse.
rewrite Pos2Z.inj_eqb in HeqHbool.
apply eq_sym in HeqHbool.
apply Z.eqb_eq in HeqHbool.
inverts HeqHbool.
remember ((b1 =? b2)%positive ) as Hbool.
destruct Hbool; simp join; tryfalse.
rewrite Pos2Z.inj_eqb in HeqHbool.
apply eq_sym in HeqHbool.
apply Z.eqb_eq in HeqHbool.
inverts HeqHbool.
remember ((b0 =? b2)%positive ) as Hbool.
destruct Hbool; simp join; tryfalse.
rewrite Pos2Z.inj_eqb in HeqHbool.
apply eq_sym in HeqHbool.
apply Z.eqb_eq in HeqHbool.
inverts HeqHbool.
simpl in *.
rewrite H7 in *.
clear H7.
assert (Int.mul ($ 1) ($ 4) = $ 4 ).
clear - i.
int auto.
rewrite H7 in *.
exists ( ∘(Int.unsigned (Int.divu ((i+ᵢ$ 4)-ᵢ$ 4) ($ 4)))).
invertsall.
assert (i0 <> i+ᵢ$ 4).
intro Hf.
apply Hq.
subst .
auto.
lets (Hrs1 & Hrs2 & Hrs3) : math_lt_mod_lt H0 H3 H1 H11; eauto.
splits;eauto.
rewrite H4; auto.
Qed.
Lemma msgqlist_p_compose:
forall p qid mqls qptrl1 qptrl2 i i1 a x3 p' v'41
msgqls1 msgqls2 msgq mqls1 mqls2 mq mqls' tcbls,
R_ECB_ETbl_P qid
(V$OS_EVENT_TYPE_Q
:: Vint32 i :: Vint32 i1 :: Vptr a :: x3 :: p' :: nil, v'41) tcbls ->
ECBList_P p (Vptr qid) qptrl1 msgqls1 mqls1 tcbls->
ECBList_P p' Vnull qptrl2 msgqls2 mqls2 tcbls->
RLH_ECBData_P msgq mq ->
EcbMod.joinsig qid mq mqls2 mqls'->
EcbMod.join mqls1 mqls' mqls ->
ECBList_P p Vnull (qptrl1 ++ ((V$OS_EVENT_TYPE_Q
:: Vint32 i :: Vint32 i1 :: Vptr a :: x3 :: p' :: nil, v'41)::nil) ++ qptrl2) (msgqls1 ++ (msgq::nil) ++msgqls2) mqls tcbls.
Proof.
intros.
simpl.
eapply ecblist_p_compose; eauto.
simpl.
eexists; splits; eauto.
do 3 eexists; splits; eauto.
unfolds; simpl; auto.
Qed.
Lemma osq_same_blk_st_out'
: forall (qptr qst qend qin qout qsz qen : val)
(b : block) (i : int32),
WellformedOSQ
(qptr
:: qst :: qend :: qin :: qout :: qsz :: qen :: Vptr (b,i) :: nil) ->
exists i', qout = Vptr (b, i').
Proof.
introv Hwf.
funfold Hwf.
simpl in H5.
funfold H9.
funfold H10.
unfold ptr_offset_right in *.
unfold ptr_minus in *.
inverts H5.
unfolds in H8.
simpl in *.
simp join.
rewrite H5 in *.
destruct x1.
destruct x2.
remember ((b0 =? b)%positive ) as Hbool.
destruct Hbool; tryfalse.
simp join.
rewrite Pos2Z.inj_eqb in HeqHbool.
apply eq_sym in HeqHbool.
apply Z.eqb_eq in HeqHbool.
apply eq_sym in HeqHbool.
inverts HeqHbool.
remember ((b1 =? b0)%positive ) as Hbool.
destruct Hbool; tryfalse.
simp join.
rewrite Pos2Z.inj_eqb in HeqHbool.
apply eq_sym in HeqHbool.
apply Z.eqb_eq in HeqHbool.
inverts HeqHbool.
eauto.
Qed.
Lemma wellq_props:
forall x12 x11 x5 x6 v'49 x i2 i1 v'47 x13 x14 v2 v'46,
length v2 = ∘OS_MAX_Q_SIZE ->
Int.ltu ($ 0) i1 = true ->
RLH_ECBData_P
(DMsgQ (Vptr (v'47, Int.zero))
(x12
:: x11
:: x5
:: x6
:: Vptr (v'49, x)
:: Vint32 i2
:: Vint32 i1 :: Vptr (v'49, Int.zero) :: nil)
(x13 :: x14 :: nil) v2) v'46 ->
WellformedOSQ
(x12
:: x11
:: x5
:: x6
:: Vptr (v'49, x)
:: Vint32 i2
:: Vint32 i1 :: Vptr (v'49, Int.zero) :: nil) ->
Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero)) <= Int.unsigned x /\
4 * ((Int.unsigned x - Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero))) / 4) =
Int.unsigned x - Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero)) /\
(Int.unsigned x - Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero))) / 4 < 20 /\
(Int.unsigned x - Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero))) / 4 < Z.of_nat (length v2) /\
rule_type_val_match (Void) ∗
(nth_val'
(Z.to_nat
((Int.unsigned x - Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero))) / 4))
v2) = true.
Proof.
introv Hlen Hint Hrl Hwf.
Definition auxand P Q := P /\ Q.
assert (auxand (Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero)) <= Int.unsigned x)
(4 * ((Int.unsigned x - Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero))) / 4) =
Int.unsigned x - Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero)) /\
(Int.unsigned x - Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero))) / 4 < 20 /\
(Int.unsigned x - Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero))) / 4 <
Z.of_nat (length v2) /\
rule_type_val_match (Void) ∗
(nth_val'
(Z.to_nat
((Int.unsigned x - Int.unsigned (Int.zero+ᵢ($ 4+ᵢInt.zero))) / 4))
v2) = true
)).
funfold Hwf.
unfold arrayelem_addr_right in *.
unfold qend_right, ptr_offset_right,ptr_minus, distance in *.
simp join.
fsimpl.
inverts H5.
unfolds in Hrl.
destruct v'46.
destruct e; tryfalse.
destruct Hrl as (Hm1 & Hm2 & Hm3).
destruct Hm3 as (Hm3 & _).
funfold Hm1.
funfold Hm2.
funfold Hm3.
unfold distance in *.
simpl in H7, H10, H18, H20, H21.
rewrite Int.repr_unsigned in *.
assert ((Int.zero+ᵢ($ 4+ᵢInt.zero)) = $4).
clear -x.
mauto.
rewrite H1 in *.
clear H1.
assert (Int.unsigned ($ 4) =4).
clear -x.
mauto.
rewrite H1 in *.
clear H1.
splits.
clear -H.
int auto.
apply eq_sym.
eapply Z_div_exact_2; try omega.
eapply math_prop_int_modu; eauto.
eapply math_prop_ltu_20; eauto.
rewrite Hlen.
unfold OS_MAX_Q_SIZE.
simpl.
eapply math_prop_ltu_20; eauto.
assert (Int.unsigned ($4) = 4).
clear -x.
mauto.
assert ( Int.unsigned (Int.divu (i0-ᵢ$ 4) ($ 4)) >
Int.unsigned (Int.divu (x-ᵢ$ 4) ($ 4)) \/
Int.unsigned (Int.divu (i0-ᵢ$ 4) ($ 4)) <
Int.unsigned (Int.divu (x-ᵢ$ 4) ($ 4)) \/
Int.unsigned (Int.divu (i0-ᵢ$ 4) ($ 4)) =
Int.unsigned (Int.divu (x-ᵢ$ 4) ($ 4))) by omega.
lets Hes : int_nat_ltu_lt H5 Hint.
lets Hls : list_length_lt Hes.
destruct Hls as (xx & ll & lt).
subst l.
destruct H9 as [Ha1 | [ Ha2 | Ha3]].
apply H18 in Ha1.
lets Has : vallist_seg_prop_eq Ha1.
lets Heq : math_out_start_eq' H11; eauto.
rewrite H1 in *.
rewrite <- Heq.
rewrite <- Has.
simpl in H19.
destruct H19.
simpl.
clear - H9.
pauto.
apply H20 in Ha2.
lets Heq : math_out_start_eq' H11; eauto.
rewrite H1 in *.
rewrite <- Heq.
remember (vallist_seg 0 ∘(Int.unsigned (Int.divu (i0-ᵢ$ 4) ($ 4))) v2) as ls.
assert ( (∘( ( Int.unsigned m)) <= length v2)%nat).
eapply list_maxsize_le; eauto.
lets Hdas: vallist_seg_prop H2 H9; eauto.
lets Hsp : list_append_split Hdas Ha2.
simp join.
simpl in H19.
destruct H19.
apply vallist_seg_prop_eq in H16.
rewrite <- H16.
simpl.
clear -H7.
pauto.
apply H21 in Ha3.
lets Heq : math_out_start_eq' H11; eauto.
destruct Ha3 as [Haa | Ha3].
simp join; tryfalse.
destruct Ha3 as (Heqa & Ha3).
rewrite H1 in *.
rewrite <- Heq.
remember (vallist_seg 0 ∘(Int.unsigned (Int.divu (i0-ᵢ$ 4) ($ 4))) v2) as ls.
assert ( (∘( ( Int.unsigned m)) <= length v2)%nat).
eapply list_maxsize_le; eauto.
lets Hdas: vallist_seg_prop H2 H9; eauto.
lets Hsp : list_append_split Hdas Ha3.
simp join.
simpl in H19.
destruct H19.
apply vallist_seg_prop_eq in H16.
rewrite <- H16.
simpl.
clear -H7.
pauto.
auto.
Qed.
Close Scope code_scope.
Close Scope int_scope.
Close Scope Z_scope.
(*
Lemma qacc_absinfer_no_msg:
forall P mqls x qmaxlen wl,
can_change_aop P ->
EcbMod.get mqls x = Some (absmsgq nil qmaxlen,wl) ->
absinfer
(<|| qacc (Vptr x :: nil) ||> ** HECBList mqls ** P)
(<|| END Some Vnull ||> ** HECBList mqls ** P).
Proof.
infer_solver 2%nat.
Qed.
Lemma qacc_absinfer_get_msg:
forall P mqls x qmaxlen wl v vl v1 v3 v4 ,
can_change_aop P ->
EcbMod.get mqls x = Some (absmsgq (v::vl) qmaxlen,wl) ->
absinfer
(<|| qacc (Vptr x :: nil) ||> **
HECBList mqls ** HTCBList v1 ** HTime v3 ** HCurTCB v4 ** P)
(<|| END Some v ||> ** HECBList (EcbMod.set mqls x (absmsgq vl qmaxlen,wl)) ** HTCBList v1 ** HTime v3 ** HCurTCB v4 ** P).
Proof.
infer_solver 3%nat.
Qed.
Lemma qacc_absinfer_null:
forall P, can_change_aop P ->
absinfer (<|| qacc (Vnull :: nil) ||> ** P) (<|| END (Some Vnull) ||> ** P).
Proof.
intros.
unfold qacc.
eapply absinfer_trans; auto.
eapply absinfer_choice1; eauto.
eapply absinfer_prim; auto.
unfolds; intros.
destruct s as [[[[]]]].
simpl in H0; mytac.
exists O (END Some Vnull).
split.
eapply hmstep_merge_emp.
constructors.
unfolds; auto.
apply eqdomO_same.
apply eqtid_same.
unfolds; intros; rewrite OSAbstMod.emp_sem.
destruct(OSAbstMod.get O a); auto.
sep auto.
Qed.
Lemma qacc_absinfer_no_q :
forall P mqls x,
can_change_aop P ->
(~ exists a b wl,EcbMod.get mqls x = Some (absmsgq a b,wl)) ->
absinfer (<|| qacc (Vptr x :: nil) ||> ** HECBList mqls ** P)
(<|| END Some Vnull ||> ** HECBList mqls ** P).
Proof.
intros.
unfold qacc.
eapply absinfer_trans.
eapply absinfer_choice2 with (q:=(HECBList mqls ** P)); can_change_aop_solver.
eapply absinfer_trans.
eapply absinfer_choice1 with (q:=(HECBList mqls ** P)); can_change_aop_solver.
eapply absinfer_prim; can_change_aop_solver.
unfolds; intros.
destruct s as [[[[]]]].
eexists O, (END Some Vnull).
split.
simpl in H1; mytac.
eapply hmstep_merge_emp.
constructors.
unfolds.
exists x.
split; auto.
exists mqls.
repeat (split;auto).
Ltac join_get_solver :=
match goal with
| H: OSAbstMod.join (OSAbstMod.sig ?x ?y) _ ?O |- OSAbstMod.get ?O ?x = Some ?y =>
eapply OSAbstMod.join_get_get_l; eauto
| H: OSAbstMod.join ?O1 ?O2 ?O |- OSAbstMod.get ?O ?x = Some ?y =>
eapply OSAbstMod.join_get_get_r; [eauto | join_get_solver]
end.
join_get_solver.
apply eqdomO_same.
apply eqtid_same.
unfolds; intros; rewrite OSAbstMod.emp_sem.
destruct(OSAbstMod.get O a); auto.
sep auto.
Qed.
*)
|
{"author": "brightfu", "repo": "CertiuCOS2", "sha": "1b7e588056a23bc32a9e442a240de3002b16eefb", "save_path": "github-repos/coq/brightfu-CertiuCOS2", "path": "github-repos/coq/brightfu-CertiuCOS2/CertiuCOS2-1b7e588056a23bc32a9e442a240de3002b16eefb/coqimp/certiucos/ucos_lib/OSQAcceptPure.v"}
|
import numpy as np
import scipy.misc
import scipy.signal
import neurokit2 as nk
import os
from sklearn import preprocessing
import pandas as pd
from numpy import genfromtxt
import pandas as pd
import matplotlib.pyplot as plt
# import timesynth as ts
from neurokit2.misc import NeuroKitWarning, listify
from itertools import tee, chain, cycle, groupby
from scipy import signal
from statistics import stdev, mean
def body_len(mal_arr, interval):
body_len_arr = []
for i in range(0, len(mal_arr), interval):
body_len_arr.append(np.nanmean(mal_arr[i:i + interval]))
return body_len_arr
def find_decreaingV2(ll, num_point=10):
found = [[] for i in range(len(ll) * 10)] # create an empty list of lists (with extra free spaces just in case)
peak_ind = 0
for j in range(len(found)):
for i in range(j, len(ll) - 1):
if ll[i] > ll[i + 1]:
found[peak_ind].append(ll[i])
else:
if i + 1 < len(ll) - 1:
found[peak_ind].append(ll[i])
peak_ind += 1
found_cleaned = [x for x in found if len(x) >= num_point] # remove the sets w fewer than 6# peaks
if len(found_cleaned) >= 3:
found.sort(key=len, reverse=True)
return found_cleaned
def find_increaingV2(ll, num_point=10):
found = [[] for i in range(len(ll) * 10)]
peak_ind = 0
for j in range(len(found)): # for every potential starting seq number
for i in range(0, len(ll) - 1):
if ll[i] <= ll[i + 1]:
found[peak_ind].append(ll[i])
else:
if i < len(ll) - 1:
found[peak_ind].append(ll[i])
peak_ind += 1
found_cleaned = [x for x in found if len(x) >= num_point] # remove the sets w fewer than 6# peaks
if len(found_cleaned) >= 3:
found.sort(key=len, reverse=True) # return a list of lists sorted my lens of its elems in descending order
return found_cleaned
""""
stop_time = 10
num_points = stop_time * 50
period = 2
frequency = 1/period
# Initializing TimeSampler
time_sampler = ts.TimeSampler(stop_time=stop_time)
# Sampling irregular time samples
irregular_time_samples = time_sampler.sample_irregular_time(num_points=num_points, keep_percentage=100)
sinusoid = ts.signals.PseudoPeriodic(amplitude = 1, frequency=1, ampSD = 0.1, freqSD = 0.1)
timeseries = ts.TimeSeries(sinusoid)
samples, signals, errors = timeseries.sample(irregular_time_samples)
# Plotting the series
#plt.plot(irregular_time_samples, samples, marker='o', markersize=4)
#plt.xlabel('Time')
#plt.ylabel('Magnitude')
#plt.ylim([-amplitude-10, amplitude+10])
#plt.title('Irregularly sampled sinusoid with noise');
"""
"""
check if a lag is far/different enough from other lags in list_of_lags
num = num of reference
leeway = +/- frames exclusion
list_of_lags: list of lag timestamps corresponding to timestamps of the left vallyes of
the first peak in a 3-peak sequence
"""
def check_far_enough(lag, leeway, list_of_lags):
result = True
for ele in list_of_lags:
if lag < ele + leeway and lag > ele - leeway: # element
# print("element", ele, "is too close to num", lag)
result = False
break
# else:
# print("element", ele, "is far enough from to num", lag)
return result
## todo: deal with negative indexes?
# goal_num == how many peak sets do we want to identify
# leeway == what's the min distance between different peak sets
def remove_overlapping(corr, goal_num_sets, signal_unpadded, leeway=5):
k = goal_num_sets * 2
sorted_corrs = np.argsort(corr) # indexes of highest corrs in order smallest -> highest
curr_top_corr_inds = list(reversed(list(sorted_corrs[-k:]))) # get top k best correlated inds
# print("the highest corr ind", list(max_corr_inds))
max_corr_inds = []
counter = 0 # counter of how many good distinct peak sets were identified
while counter < goal_num_sets:
for i in curr_top_corr_inds: # get a lag for each highest correlation
lag = i #not actually a lag; lag is i+len(signal_unpadded)
if lag>300 and check_far_enough(lag, leeway=leeway,
list_of_lags=max_corr_inds): # if this lag is not to close to any other lags
if lag > len(signal_unpadded) and lag < len(corr) - 2 * len(signal_unpadded):
max_corr_inds.append(lag)
counter += 1
# sorted_corrs = np.argsort(sorted_corrs)
sorted_corrs = sorted_corrs[:-k] # remove the idexes we already looked through
curr_top_corr_inds = list(reversed(list(sorted_corrs[-k:]))) # get top k best correlated inds
#print("the highest corr ind", list(curr_top_corr_inds))
return max_corr_inds
"""
duration : float, Desired length of duration (s).
sampling_rate : int, the desired sampling rate (in Hz, i.e., samples/second).
frequency : float or list, oscillatory frequency of the signal (in Hz, i.e., oscillations per second). == 1/period
amplitude : float or list, Amplitude of the oscillations.
"""
# av_worm_len = mean(currMAL)
def cross_correlate(currMAL, fps=5, freq_elong = 0.6, freq_contr = 0.8, goal_num_sets=10, leeway=5):
scaling_factor = 0.8
av_worm_len = np.nanmean(currMAL) * scaling_factor #todo: change?
amplitude = (av_worm_len / 2)* scaling_factor
sampling_rate = fps * 1.6 # arbitrary
#freq_elong = 0.6 # --> duration of elong ~ (1/0.6)/2 = 0.833 sec
#freq_contr = 0.8 # scrunch/sec --> duration of contraction ~ (1/0.8)/2 = 0.625 sec
duration = (freq_elong + freq_contr) * 5
# The temporal frequency was converted into a relative speed in body lengths per second by defining
# v_m* = v_m ⋅ |delta e_max| (where e is the max in-cycle length change, normalize by max gliding length)
# for D. japonica, oscillation frequency is between 0.7 and 0.75 Hz
elong = nk.signal_simulate(duration=duration, sampling_rate=sampling_rate, frequency=[freq_elong],
amplitude=[amplitude])
contr = nk.signal_simulate(duration=duration, sampling_rate=sampling_rate, frequency=[freq_contr],
amplitude=[amplitude])
elong_intervals = find_increaingV2(elong, num_point=int((1 / freq_elong) / 2 * fps))
contr_intervals = find_decreaingV2(contr, num_point=int((1 / freq_contr) / 2 * fps))
signal_unpadded = []
for i in range(1):
midway = int(len(elong_intervals[0])/2)
signal_unpadded.append(elong_intervals[0][midway:])
signal_unpadded.append(contr_intervals[0])
signal_unpadded.append(elong_intervals[1])
signal_unpadded.append(contr_intervals[1])
signal_unpadded.append(elong_intervals[2])
midway = int(len(contr_intervals[0])/2)
signal_unpadded.append(contr_intervals[2][:midway+1])
signal_unpadded = list(chain.from_iterable(
signal_unpadded)) # unpack the list of list into one long list containing all y coords of the data
#plt.plot(signal_unpadded)
# create a padded version of the signal
signal_padded = np.empty(len(currMAL))
signal_padded.fill(av_worm_len) # shift the axis up so that the signal oscillates around y=av worm length
signal_padded[:len(signal_unpadded)] = signal_unpadded + av_worm_len
#signal_unpadded = [x+av_worm_len*0.7 for x in signal_unpadded]
#currMAL = list(chain.from_iterable(preprocessing.normalize([currMAL]))) #this was an attempt to
#signal_unpadded = list(chain.from_iterable(preprocessing.normalize([signal_unpadded])))
#signal_padded = list(chain.from_iterable(preprocessing.normalize([signal_padded])))
corr = signal.correlate(currMAL, signal_unpadded, mode="full")
#corr = signal.correlate(currMAL, signal_padded, mode="same")
#print("mean of top 5 corrs", mean(sorted(corr, reverse=True)[:5]))
lags = signal.correlation_lags(len(currMAL), len(signal_unpadded), mode="full") # == corr[i] - len(signal_unpadded)
max_corrs = remove_overlapping(corr, goal_num_sets=goal_num_sets, signal_unpadded=signal_unpadded, leeway=leeway)
# timestamps of left valleys of the first peak for sets of 3 peaks
list_of_lags_final = lags[max_corrs]
return list_of_lags_final, signal_unpadded
def generate_overlap_plots(list_of_lags_final, currMAL, signal_unpadded, filepath, wellNum):
av_worm_len = np.nanmean(currMAL)
for i in list_of_lags_final:
# print("lag ", i)
overlap = np.empty(len(currMAL))
overlap.fill(av_worm_len) # shift the axis up so that the signal oscillates around y=av worm length
overlap[i:i + len(signal_unpadded)] = signal_unpadded + av_worm_len
plt.plot(overlap) # visualize the overlap
plt.plot(currMAL)
plt.title("Well "+ str(wellNum))
plt.xlim([i - 100, i + len(signal_unpadded) + 100])
outpath = os.path.expanduser(filepath + "/results/synth_signal_tests/peak overlaps well" + str(wellNum) + "ind" + str(i) + ".png")
plt.savefig(outpath)
plt.show()
plt.close()
"""
wellNum = 5
filepath = "/Users/Arina/Desktop"
# filepath = "/Volumes/Collins_Lab/15"
filename = filepath + "/results/well_data/MAL_well" + str(wellNum) + ".csv"
currMAL = genfromtxt(filename, delimiter=',')
list_of_lags_final, signal_unpadded = cross_correlate(currMAL, fps=10)
generate_overlap_plots(list_of_lags_final, currMAL, signal_unpadded, filepath, wellNum)
"""
|
{"hexsha": "8296f804769685c4b1a3dd7cd47b323717c49980", "size": 9565, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_synth_signal.py", "max_stars_repo_name": "akazako1/CollinsLab_ScrunchingAnalysis", "max_stars_repo_head_hexsha": "91509671fdada9b59f0e3e027b989afc53e5d45d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-04T01:10:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-04T01:10:21.000Z", "max_issues_repo_path": "generate_synth_signal.py", "max_issues_repo_name": "akazako1/CollinsLab_ScrunchingAnalysis", "max_issues_repo_head_hexsha": "91509671fdada9b59f0e3e027b989afc53e5d45d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate_synth_signal.py", "max_forks_repo_name": "akazako1/CollinsLab_ScrunchingAnalysis", "max_forks_repo_head_hexsha": "91509671fdada9b59f0e3e027b989afc53e5d45d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.7008928571, "max_line_length": 138, "alphanum_fraction": 0.6785154208, "include": true, "reason": "import numpy,from numpy,import scipy,from scipy", "num_tokens": 2555}
|
# Define server logic to read selected file ----
server <- function(input, output,session) {
#install.packages("readtext")
library("readtext")
create_readData_file<-"D:/RWebProject/ShinyApp-ver1/ver03/readData.txt"
algorithm_file_path<-"D:/RWebProject/ShinyApp-ver1/ver03/action.R"
create_Result_file<- "D:/RWebProject/ShinyApp-ver1/ver03/result.txt"
#on submit button condition
observeEvent(input$submit, {
# if input text and no file upload then show messgae dialog
if(input$text =="" && is.null(input$file1))
{
showModal
(modalDialog(
title = "Results",
"Either File is not uploaded or text field is empty",
easyClose = TRUE
))
#showmodel closed
}
#if closed
#separate condition
# check if text input is filled
else if(input$text!=""){
#x<-"D:/RWebProject/ShinyApp-ver1/ver03/readData.txt"
#write.table(input$text, file=x,row.names=FALSE,col.names = FALSE,sep="")
#save input-text into readData.txt
sink(create_readData_file)
cat(input$text)
sink()
#output section'
output$contents <- renderTable({
#run R Script this section is to call algorithm scripts like xgboost,sentimental analysis
source(algorithm_file_path)
#provide here R Script of algorithms here
#after script is run it saves result into result.txt
result_File<- create_Result_file
resultant<-readtext(result_File)
#this section is for shwing data from result.txt
print(resultant$text)
})
#output closed
# show dialog about final result like fake or not
showModal(modalDialog(
title = "Results",
"The New is FAKE!",
easyClose = TRUE
))
#sho wmodal closed
}
#else if closed
#check if file is uploaded
# provided with input-text is empty
# and submit button is clicked
else if(!(is.null(input$file1))){
output$contents <- renderTable({
req(input$file1)
tryCatch(
{
#copy file with same name (readData.txt)
# current.folder<-"D:/RWebProject/ShinyApp-ver1/ver03/"
#file.copy(input$file1$datapath,paste0(current.folder,"readData.txt"), overwrite = TRUE)
file.copy(input$file1$datapath,create_readData_file, overwrite = TRUE)
#run R Script this section is to call algorithm scripts like xgboost,sentimental analysis
source(algorithm_file_path)
#after script is run it saves result into result.txt
result_File<- create_Result_file
resultant<-readtext(result_File)
#this section is for shwing data from result.txt
print(resultant$text)
},
# try closed
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
#error closed
)
#trycatch closed
})
#output$contents- renderTable closed
}
# #else if - fileInput closed
#
})
#server closed
}
|
{"hexsha": "c2a3185711a673734e0211be60f371bfe51272b3", "size": 3511, "ext": "r", "lang": "R", "max_stars_repo_path": "fakeNewsFrontEnd/server.r", "max_stars_repo_name": "ShiMarinho/MachineLearning-FakeNews", "max_stars_repo_head_hexsha": "e20d983923deb1380ba231177fe8131d60dfe506", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fakeNewsFrontEnd/server.r", "max_issues_repo_name": "ShiMarinho/MachineLearning-FakeNews", "max_issues_repo_head_hexsha": "e20d983923deb1380ba231177fe8131d60dfe506", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-11T12:09:27.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-11T12:09:27.000Z", "max_forks_repo_path": "fakeNewsFrontEnd/server.r", "max_forks_repo_name": "ShiMarinho/MachineLearning-FakeNews", "max_forks_repo_head_hexsha": "e20d983923deb1380ba231177fe8131d60dfe506", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9181818182, "max_line_length": 104, "alphanum_fraction": 0.5545428653, "num_tokens": 767}
|
"""BERT embedding."""
import argparse
import io
import logging
import os
import numpy as np
import mxnet as mx
from mxnet.gluon.data import DataLoader
import gluonnlp
from gluonnlp.data import BERTTokenizer, BERTSentenceTransform
from gluonnlp.base import get_home_dir
try:
from data.embedding import BertEmbeddingDataset
except ImportError:
from .data.embedding import BertEmbeddingDataset
try:
unicode
except NameError:
# Define `unicode` for Python3
def unicode(s, *_):
return s
def to_unicode(s):
return unicode(s, 'utf-8')
__all__ = ['BertEmbedding']
logger = logging.getLogger(__name__)
class BertEmbedding(object):
"""
Encoding from BERT model.
Parameters
----------
ctx : Context.
running BertEmbedding on which gpu device id.
dtype: str
data type to use for the model.
model : str, default bert_12_768_12.
pre-trained BERT model
dataset_name : str, default book_corpus_wiki_en_uncased.
pre-trained model dataset
params_path: str, default None
path to a parameters file to load instead of the pretrained model.
max_seq_length : int, default 25
max length of each sequence
batch_size : int, default 256
batch size
root : str, default '$MXNET_HOME/models' with MXNET_HOME defaults to '~/.mxnet'
Location for keeping the model parameters.
"""
def __init__(self, ctx=mx.cpu(), dtype='float32', model='bert_12_768_12',
dataset_name='book_corpus_wiki_en_uncased', params_path=None,
max_seq_length=25, batch_size=256,
root=os.path.join(get_home_dir(), 'models')):
self.ctx = ctx
self.dtype = dtype
self.max_seq_length = max_seq_length
self.batch_size = batch_size
self.dataset_name = dataset_name
# Don't download the pretrained models if we have a parameter path
self.bert, self.vocab = gluonnlp.model.get_model(model,
dataset_name=self.dataset_name,
pretrained=params_path is None,
ctx=self.ctx,
use_pooler=False,
use_decoder=False,
use_classifier=False,
root=root)
self.bert.cast(self.dtype)
if params_path:
logger.info('Loading params from %s', params_path)
self.bert.load_parameters(params_path, ctx=ctx, ignore_extra=True)
lower = 'uncased' in self.dataset_name
self.tokenizer = BERTTokenizer(self.vocab, lower=lower)
self.transform = BERTSentenceTransform(tokenizer=self.tokenizer,
max_seq_length=self.max_seq_length,
pair=False)
def __call__(self, sentences, oov_way='avg'):
return self.embedding(sentences, oov_way='avg')
def embedding(self, sentences, oov_way='avg'):
"""
Get tokens, tokens embedding
Parameters
----------
sentences : List[str]
sentences for encoding.
oov_way : str, default avg.
use **avg**, **sum** or **last** to get token embedding for those out of
vocabulary words
Returns
-------
List[(List[str], List[ndarray])]
List of tokens, and tokens embedding
"""
data_iter = self.data_loader(sentences=sentences)
batches = []
for token_ids, valid_length, token_types in data_iter:
token_ids = token_ids.as_in_context(self.ctx)
valid_length = valid_length.as_in_context(self.ctx)
token_types = token_types.as_in_context(self.ctx)
sequence_outputs = self.bert(token_ids, token_types,
valid_length.astype(self.dtype))
for token_id, sequence_output in zip(token_ids.asnumpy(),
sequence_outputs.asnumpy()):
batches.append((token_id, sequence_output))
return self.oov(batches, oov_way)
def data_loader(self, sentences, shuffle=False):
"""Load, tokenize and prepare the input sentences."""
dataset = BertEmbeddingDataset(sentences, self.transform)
return DataLoader(dataset=dataset, batch_size=self.batch_size, shuffle=shuffle)
def oov(self, batches, oov_way='avg'):
"""
How to handle oov. Also filter out [CLS], [SEP] tokens.
Parameters
----------
batches : List[(tokens_id,
sequence_outputs,
pooled_output].
batch token_ids (max_seq_length, ),
sequence_outputs (max_seq_length, dim, ),
pooled_output (dim, )
oov_way : str
use **avg**, **sum** or **last** to get token embedding for those out of
vocabulary words
Returns
-------
List[(List[str], List[ndarray])]
List of tokens, and tokens embedding
"""
sentences = []
for token_ids, sequence_outputs in batches:
tokens = []
tensors = []
oov_len = 1
for token_id, sequence_output in zip(token_ids, sequence_outputs):
if token_id == 1:
# [PAD] token, sequence is finished.
break
if token_id in (2, 3):
# [CLS], [SEP]
continue
token = self.vocab.idx_to_token[token_id]
if token.startswith('##'):
token = token[2:]
tokens[-1] += token
if oov_way == 'last':
tensors[-1] = sequence_output
else:
tensors[-1] += sequence_output
if oov_way == 'avg':
oov_len += 1
else: # iv, avg last oov
if oov_len > 1:
tensors[-1] /= oov_len
oov_len = 1
tokens.append(token)
tensors.append(sequence_output)
if oov_len > 1: # if the whole sentence is one oov, handle this special case
tensors[-1] /= oov_len
sentences.append((tokens, tensors))
return sentences
if __name__ == '__main__':
np.set_printoptions(threshold=5)
parser = argparse.ArgumentParser(description='Get embeddings from BERT',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--gpu', type=int, default=None,
help='id of the gpu to use. Set it to empty means to use cpu.')
parser.add_argument('--dtype', type=str, default='float32', help='data dtype')
parser.add_argument('--model', type=str, default='bert_12_768_12',
help='pre-trained model')
parser.add_argument('--dataset_name', type=str, default='book_corpus_wiki_en_uncased',
help='dataset')
parser.add_argument('--params_path', type=str, default=None,
help='path to a params file to load instead of the pretrained model.')
parser.add_argument('--max_seq_length', type=int, default=25,
help='max length of each sequence')
parser.add_argument('--batch_size', type=int, default=256,
help='batch size')
parser.add_argument('--oov_way', type=str, default='avg',
help='how to handle oov\n'
'avg: average all oov embeddings to represent the original token\n'
'sum: sum all oov embeddings to represent the original token\n'
'last: use last oov embeddings to represent the original token\n')
parser.add_argument('--sentences', type=to_unicode, nargs='+', default=None,
help='sentence for encoding')
parser.add_argument('--file', type=str, default=None,
help='file for encoding')
parser.add_argument('--verbose', action='store_true', help='verbose logging')
args = parser.parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
logging.getLogger().setLevel(level)
logging.info(args)
if args.gpu is not None:
context = mx.gpu(args.gpu)
else:
context = mx.cpu()
bert_embedding = BertEmbedding(ctx=context, model=args.model, dataset_name=args.dataset_name,
max_seq_length=args.max_seq_length, batch_size=args.batch_size)
result = []
sents = []
if args.sentences:
sents = args.sentences
result = bert_embedding(sents, oov_way=args.oov_way)
elif args.file:
with io.open(args.file, 'r', encoding='utf8') as in_file:
for line in in_file:
sents.append(line.strip())
result = bert_embedding(sents, oov_way=args.oov_way)
else:
logger.error('Please specify --sentence or --file')
if result:
for sent, embeddings in zip(sents, result):
print('Text: {}'.format(sent))
_, tokens_embedding = embeddings
print('Tokens embedding: {}'.format(tokens_embedding))
|
{"hexsha": "728b885e51c7e363e4a86a6d138390dd19416a5a", "size": 9631, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/bert/embedding.py", "max_stars_repo_name": "faramarzmunshi/gluon-nlp", "max_stars_repo_head_hexsha": "218661c71b62b025d636975d2e71a0a4c2ea9f76", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 62, "max_stars_repo_stars_event_min_datetime": "2019-04-06T14:06:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T02:56:42.000Z", "max_issues_repo_path": "scripts/bert/embedding.py", "max_issues_repo_name": "faramarzmunshi/gluon-nlp", "max_issues_repo_head_hexsha": "218661c71b62b025d636975d2e71a0a4c2ea9f76", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-01T21:40:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-01T21:40:43.000Z", "max_forks_repo_path": "scripts/bert/embedding.py", "max_forks_repo_name": "faramarzmunshi/gluon-nlp", "max_forks_repo_head_hexsha": "218661c71b62b025d636975d2e71a0a4c2ea9f76", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2019-04-05T18:24:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T09:04:19.000Z", "avg_line_length": 39.633744856, "max_line_length": 98, "alphanum_fraction": 0.5596511266, "include": true, "reason": "import numpy", "num_tokens": 1928}
|
"""
Title: Random forest digital twin template
Authors: Blagoj Delipetrev, Mattia Santoro, Nicholas Spadaro
Date created: 2020/11/09
Last modified: 2020/11/09
Description: Templates for creation and execution through the VLAB on DestinationEarth VirtualCloud of random forest based digital twins.
Version: 0.1
"""
import json, csv
import os, sys, getopt, math
import numpy as np
import math, copy, time
from pathlib import Path
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.model_selection import train_test_split
from joblib import dump, load
from prepare_image import loadTrainData, loadClassifyData
from utils import blockshaped, unblockshaped, Setcmap
from generate_raster import numpy_array_to_raster, NO_DATA, GDAL_DATA_TYPE, SPATIAL_REFERENCE_SYSTEM_WKID, GEOTIFF_DRIVER_NAME, do_parse
from prepare_sentinel_product import subset_and_resample, do_transform
from soil_moisture_ftp_client import download_to
from prepare_edge_data import invoke_prepare
# csv edgestream
# west,south,east,north,YYYY,MM,DD,value
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
def searchBestParamsGridCV(rf, base_model, train_features, train_labels,test_features, test_labels ):
param_grid = {
'bootstrap': [True],
'max_depth': [1, 4, 8, 10],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [20, 40, 50, 100]
}
# rf = RandomForestRegressor()
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 3, n_jobs = -1, verbose = 2)
grid_search.fit(train_features, train_labels)
pprint(grid_search.best_params_)
# base_model = RandomForestRegressor(n_estimators = 10, random_state = 42)
base_model.fit(train_features, train_labels)
base_accuracy = evaluate(base_model, test_features, test_labels)
best_grid = grid_search.best_estimator_
grid_accuracy = evaluate(best_grid, test_features, test_labels)
print("grid_accuracy: ",grid_accuracy)
print('Improvement of {:0.2f}%.'.format( 100 * (grid_accuracy - base_accuracy) / base_accuracy))
def getVlabParams():
# bbox i west,south,east,north
with open('vlabparams.json') as json_file:
return json.load(json_file)
def trainClass():
vlabparams=getVlabParams()
bbox = vlabparams['bbox'].split(',')
features = vlabparams['features'].split(',')
targets = vlabparams['targets'].split(',')
X, Y, origshape = loadTrainData('data/inputs/', features = features, targets=targets)
X = np.rollaxis(X, 0, 4)
X = X.reshape(X.shape[0]*X.shape[1]*X.shape[2],X.shape[3])
Y = Y.flatten()
Y = np.digitize(Y,bins=[0.1, 0.2, 0.3, 0.4, 0.5]) #, 0.6, 0.7, 0.8, 0.9, 1.0])
print("X.shape: ", X.shape)
print("Y.shape: ", Y.shape)
X[np.isnan(X)]=0
Y[np.isnan(Y)]=0
print("Train/Test split")
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=0)
del X,Y
rf = RandomForestClassifier(n_estimators=50,
oob_score=True,
max_depth=4,
n_jobs=-1,
verbose=2,
bootstrap=True)
print("Train")
rf.fit(X_train, y_train)
print("train score: ", rf.score(X_train,y_train))
print("Dump")
dump(rf, 'data/outputs/model.joblib')
print("Test")
print(rf.score(X_test,y_test))
del X_train, X_test, y_train, y_test
flds = list(os.walk('data/satproduct/'))[0][1]
X,origshape = loadClassifyData('data/inputs/{}'.format(flds[0]), features = features, modifiers={})
X = np.rollaxis(X, 0, 4)
X = X.reshape(X.shape[0]*X.shape[1]*X.shape[2],X.shape[3])
Y = (rf.predict(X))
Y=Y.reshape(-1,32, 32)
Y=unblockshaped(Y,int(origshape[0]), int(origshape[1]))
numpy_array_to_raster('data/outputs/prediction.tif', Y, (float(bbox[0]), float(bbox[3])),
0.000091903317710, 1, NO_DATA,
GDAL_DATA_TYPE, SPATIAL_REFERENCE_SYSTEM_WKID, GEOTIFF_DRIVER_NAME)
def trainRegr():
vlabparams=getVlabParams()
bbox = vlabparams['bbox'].split(',')
features = vlabparams['features'].split(',')
targets = vlabparams['targets'].split(',')
X_train, X_test, y_train, y_test = getTestTrainData(features, targets)
regr = RandomForestRegressor(max_depth=100, min_samples_leaf=3,min_samples_split=8, bootstrap=True, random_state=0, n_estimators = 100, verbose=2, n_jobs=-1, warm_start=True)
print("Train")
regr.fit(X_train, y_train)
print("train score: ", regr.score(X_train,y_train))
print("Dump")
dump(regr, 'data/outputs/model.joblib')
print("Test")
print(regr.score(X_test,y_test))
del X_train, X_test, y_train, y_test
flds = list(os.walk('data/satproduct/'))[0][1]
X,origshape = loadClassifyData('data/inputs/{}'.format(flds[0]), features = features, modifiers={})
X = np.rollaxis(X, 0, 4)
X = X.reshape(X.shape[0]*X.shape[1]*X.shape[2],X.shape[3])
Y = (regr.predict(X))
Y=Y.reshape(-1,32, 32)
Y=unblockshaped(Y,int(origshape[0]), int(origshape[1]))
numpy_array_to_raster('data/outputs/prediction.tif', Y, (float(bbox[0]), float(bbox[3])),
0.000091903317710, 1, NO_DATA,
GDAL_DATA_TYPE, SPATIAL_REFERENCE_SYSTEM_WKID, GEOTIFF_DRIVER_NAME)
def run():
vlabparams=getVlabParams()
bbox = vlabparams['bbox'].split(',')
features = vlabparams['features'].split(',')
modifiers = json.loads(vlabparams['modifiers'])
flds = list(os.walk('data/satproduct/'))[0][1]
X, origshape = loadClassifyData('data/inputs/{}'.format(flds[0]), features = features, modifiers=modifiers)
X = np.rollaxis(X, 0, 4)
X=X.reshape(X.shape[0]*X.shape[1]*X.shape[2],X.shape[3])
regr = load('data/model.joblib')
Y = (regr.predict(X))
Y=Y.reshape(-1,32, 32)
Y=unblockshaped(Y,int(origshape[0]), int(origshape[1]))
# Y = np.flip(Y, 0)
numpy_array_to_raster('data/outputs/prediction.tif', Y, (float(bbox[0]), float(bbox[3])),
0.000091903317710, 1, NO_DATA,
GDAL_DATA_TYPE, SPATIAL_REFERENCE_SYSTEM_WKID, GEOTIFF_DRIVER_NAME)
def getTestTrainData(features, targets):
X, Y, origshape = loadTrainData('data/inputs/', features = features, targets=targets)
X = np.rollaxis(X, 0, 4)
X = X.reshape(X.shape[0]*X.shape[1]*X.shape[2],X.shape[3])
Y = Y.flatten()
Y = np.digitize(Y,bins=[0.1, 0.2, 0.3, 0.4, 0.5]) #, 0.6, 0.7, 0.8, 0.9, 1.0])
print("X.shape: ", X.shape)
print("Y.shape: ", Y.shape)
X[np.isnan(X)]=0
Y[np.isnan(Y)]=0
print("Train/Test split")
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=0)
del X,Y
return X_train, X_test, y_train, y_test
def optimizeRegr():
vlabparams=getVlabParams()
bbox = vlabparams['bbox'].split(',')
features = vlabparams['features'].split(',')
targets = vlabparams['targets'].split(',')
rf = RandomForestRegressor()
base_model = RandomForestRegressor(n_estimators = 10, random_state = 42)
X_train, X_test, y_train, y_test = getTestTrainData(features, targets)
searchBestParamsGridCV(rf, base_model, X_train, y_train,X_test, y_test )
def optimizeClassifier():
vlabparams=getVlabParams()
bbox = vlabparams['bbox'].split(',')
features = vlabparams['features'].split(',')
targets = vlabparams['targets'].split(',')
rf = RandomForestClassifier()
base_model = RandomForestClassifier(n_estimators = 10, random_state = 42)
X_train, X_test, y_train, y_test = getTestTrainData(features, targets)
searchBestParamsGridCV(rf, base_model, X_train, y_train,X_test, y_test )
def main(argv):
#data/satproduct/
vlabparams=getVlabParams()
bbox = vlabparams['bbox'].split(',')
Path("data/inputs/").mkdir(parents=True, exist_ok=True)
flds = list(os.walk('data/satproduct/'))[0][1]
mode = int(sys.argv[1])
shape = None
for fld in flds:
Path('data/inputs/{}/'.format(fld)).mkdir(parents=True, exist_ok=True)
Path('data/edge/{}/'.format(fld)).mkdir(parents=True, exist_ok=True)
shape = subset_and_resample("data/satproduct/{}/".format(fld), 'data/inputs/{}/'.format(fld), bbox )
_, y, m, d = do_parse(fld)
if 'SOIL' in vlabparams['features'].split(',') or 'SOIL' in vlabparams['targets'].split(','):
download_to('data/edge/{}/'.format(fld),'data/inputs/{}/'.format(fld),y,m,d, bbox)
invoke_prepare('data/edge/','data/inputs/{}/'.format(fld),y,m,d, bbox, shape)
if mode==0:
run()
elif mode==1:
trainRegr()
elif mode==3:
optimizeRegr()
elif mode==4:
optimizeClassifier()
elif mode==2:
trainRegr()
if __name__ == "__main__":
main(sys.argv[1:])
|
{"hexsha": "fd4a7c53c9a875541d694c347b0957ad8b539412", "size": 9301, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "snicholas/RandomForestDT", "max_stars_repo_head_hexsha": "f8fd1d181e27592f5b4d15c9906d250081b1fd02", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "snicholas/RandomForestDT", "max_issues_repo_head_hexsha": "f8fd1d181e27592f5b4d15c9906d250081b1fd02", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "snicholas/RandomForestDT", "max_forks_repo_head_hexsha": "f8fd1d181e27592f5b4d15c9906d250081b1fd02", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.7938596491, "max_line_length": 180, "alphanum_fraction": 0.6325126331, "include": true, "reason": "import numpy", "num_tokens": 2632}
|
import math
from typing import Optional, Union, List, Type
import numpy as np
import torch
import torch.multiprocessing
from falkon.sparse.sparse_tensor import SparseTensor
__all__ = (
"select_dim_over_nm",
"select_dim_over_nd",
"select_dim_over_nm_v2",
"calc_gpu_block_sizes",
"choose_fn",
"sizeof_dtype",
"check_sparse",
"check_same_dtype",
"check_same_device",
)
def solve_quad(a, b, c):
if a == 0:
return float('inf')
return (-b + math.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
def solve_lin(b, c):
return - c / b
def select_dim_over_nm(max_n, max_m, d, coef_nd, coef_md, coef_nm, coef_n, coef_m, rest, max_mem):
"""Finds the optimal values for `n` and `m` to fit in available memory.
This function should be called for problems where the GPU needs to hold
two blocks of data (one of size m, one of size n) and one kernel block
(of size n x m).
Parameters
-----------
max_n : int
The maximum value for n (the first dimension of the problem)
max_m : int
The maximum value for m (the second dimension of the problem)
d : int
The dimensionality of the data
coef_nd : float
How many n*d blocks need to be held in memory
coef_md : float
How many m*d blocks need to be held in memory
coef_nm : float
How many m*n blocks need to be held in memory
coef_n : float
How many n-dimensional vectors need to be held in memory
coef_m : float
How many m-dimensional vectors need to be held in memory
rest : float
additional bytes to be kept in memory
max_mem : float
The amount of available memory in bytes. This is the main problem constraint
Returns
-------
out_n : int
The dimension n to use in order to fit in available memory
out_m : int
The dimension m to use in order to fit in available memory
Notes
------
The equation gives a hyperbola. We intersect the hyperbola
with a line from the origin, with the slope given by the ratio
of max_m and max_n. We then solve a quadratic equation to find
the intersection point.
"""
fac = max_m / max_n
if coef_nm == 0 and (coef_nd == 0 and coef_md == 0 and coef_n == 0 and coef_m == 0):
v_n = max_n
elif coef_nm == 0:
v_n = solve_lin(b=d * (coef_nd + fac * coef_md) + coef_n + coef_m * fac,
c=rest - max_mem)
else:
v_n = solve_quad(a=fac * coef_nm,
b=d * (fac * coef_md + coef_nd) + fac * coef_m + coef_n,
c=rest - max_mem)
v_m = fac * v_n
out_n = int(min(v_n, max_n))
out_m = int(min(v_m, max_m))
if out_n <= 0 or out_m <= 0:
raise MemoryError("Available memory %.2fMB is not enough." % (max_mem / 2**20))
return out_n, out_m
def select_dim_over_nd(max_n, max_d, coef_nd, coef_n, coef_d, rest, max_mem):
"""
solves the problem, max n*d such that n <= maxN, d <= maxD and
coef_nd*nd + coef_n*n + coef_d*d + rest <= tot
"""
if coef_nd == 0 and (coef_n == 0 or coef_d == 0): # One or 0 variables interesting
if coef_d == coef_n:
n, d = max_n, max_d
elif coef_n == 0:
n = max_n
d = (max_mem - rest) / coef_d
else: # coef_d == 0
n = (max_mem - rest) / coef_n
d = max_d
else: # Both variables are used. We solve assuming n == d
if coef_nd == 0:
x = solve_lin(b=coef_n + coef_d, c=rest - max_mem)
else:
try:
x = solve_quad(a=coef_nd, b=coef_n + coef_d, c=rest - max_mem)
except ValueError: # Does not intersect x-axis.
x = -1
n = math.floor(min(max_n, x))
d = math.floor(min(max_d, x))
# If one of n, d reaches the max, try use up the remaining memory on the other one.
if d == max_d and n < max_n:
# Assume d fixed at maxD, and derive for the best value of n
n = (max_mem - rest - coef_d * d) / (coef_nd * d + coef_n)
elif d < max_d and n == max_n:
# Assume n fixed at maxN, and derive for the best value of d
d = (max_mem - rest - coef_n * n) / (coef_nd * n + coef_d)
n = int(min(max_n, n))
d = int(min(max_d, d))
if n <= 0 or d <= 0:
raise MemoryError("Available memory %.2fMB is not enough." % (max_mem / 2 ** 20))
return n, d
def select_dim_over_nm_v2(max_n, max_m, coef_nm, coef_n, coef_m, rest, max_mem):
"""
solves the problem, max n*m such that n <= maxN, m <= maxM and
coef_nm*nm + coef_n*n + coef_m*m <= tot
"""
return select_dim_over_nd(max_n=max_n, max_d=max_m, coef_nd=coef_nm, coef_n=coef_n, coef_d=coef_m,
rest=rest, max_mem=max_mem)
def calc_gpu_block_sizes(device_info, tot_size):
gpu_speed = np.array([g.speed for g in device_info])
speed_frac = np.array(gpu_speed) / np.sum(gpu_speed)
block_sizes = np.cumsum(np.concatenate(([0], speed_frac))) * tot_size
block_sizes[0] = 0
block_sizes[-1] = tot_size
return np.floor(block_sizes).astype(np.int64).tolist()
def choose_fn(dtype, f64_fn, f32_fn, fn_name):
# Necessary to check torch early because comparing
# torch.dtype == numpy.dtype results in a type-error.
if isinstance(dtype, torch.dtype):
if dtype == torch.float64:
return f64_fn
if dtype == torch.float32:
return f32_fn
if dtype == np.float64:
return f64_fn
if dtype == np.float32:
return f32_fn
raise TypeError("No %s function exists for data type %s." % (fn_name, dtype))
def sizeof_dtype(dtype: Union[torch.dtype, np.dtype, Type]) -> int:
# Necessary to check torch early because comparing
# torch.dtype == numpy.dtype results in a type-error.
if isinstance(dtype, torch.dtype):
if dtype == torch.float64:
return 8
if dtype == torch.float32:
return 4
if dtype == np.float64:
return 8
if dtype == np.float32:
return 4
raise TypeError("Dtype %s not valid" % (dtype))
def check_sparse(*args: Union[torch.Tensor, SparseTensor]) -> List[bool]:
out = []
for t in args:
out.append(isinstance(t, SparseTensor))
return out
def check_same_dtype(*args: Optional[Union[torch.Tensor, SparseTensor]]) -> bool:
dt = None
all_equal = True
for a in args:
if a is None:
continue
if dt is None:
dt = a.dtype
else:
all_equal &= a.dtype == dt
return all_equal
def check_same_device(*args: Union[None, torch.Tensor, SparseTensor]) -> bool:
dev = None
for t in args:
if t is None:
continue
t_dev = t.device
if dev is None:
dev = t_dev
elif t_dev != dev:
return False
return True
|
{"hexsha": "dc6721abfb41ecc54bdd719b71ebb745bd1f4d36", "size": 6964, "ext": "py", "lang": "Python", "max_stars_repo_path": "falkon/utils/helpers.py", "max_stars_repo_name": "Akatsuki96/falkon", "max_stars_repo_head_hexsha": "380ffc2fdd831955255f5ead297f0e729b1af147", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-10T16:50:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-10T16:50:53.000Z", "max_issues_repo_path": "falkon/utils/helpers.py", "max_issues_repo_name": "Akatsuki96/falkon", "max_issues_repo_head_hexsha": "380ffc2fdd831955255f5ead297f0e729b1af147", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "falkon/utils/helpers.py", "max_forks_repo_name": "Akatsuki96/falkon", "max_forks_repo_head_hexsha": "380ffc2fdd831955255f5ead297f0e729b1af147", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5113122172, "max_line_length": 102, "alphanum_fraction": 0.5964962665, "include": true, "reason": "import numpy", "num_tokens": 1901}
|
## --- test Interpolations.jl
# Interpolation
@test linterp1(1:10, 1:10, 5.5) == 5.5
@test linterp1(1:10, collect(1:10.), 3:7) == 3:7
@test linterp1(1:10,21:30,5:0.5:6) == [25.0, 25.5, 26.0]
@test linterp1s(10:-1:1,21:30,5:0.5:6) == [26.0, 25.5, 25.0]
@test linterp_at_index(1:100,10) == 10
# Extrapolation
@test linterp1(1:10, 1:10, 15) == 15 # Default is to extrapolate
@test linterp1(1:10, 1:10, 15, extrapolate=-5) == -5
@test linterp1(1:10, 1:10, 5, extrapolate=-5) == 5
@test isnan(linterp1(1:10, 1:10, 15, extrapolate=NaN))
@test linterp1(1:10,1:10,0:11) == 0:11 # Default is to extrapolate
@test linterp1(1:10,1:10,0:11, extrapolate=:Linear) == 0:11
@test linterp1(1:10,1:10,0.5:10.5, extrapolate=:Linear) == 0.5:10.5
@test linterp1(1:10,1:10,0.5:10.5, extrapolate=-5) == [-5; 1.5:9.5; -5]
@test all(linterp1(1:10,1:10,0.5:10.5, extrapolate=NaN) .=== [NaN; 1.5:9.5; NaN])
@test isnan(linterp_at_index(1:100,-10))
@test linterp_at_index(1:100,-10, 0) == 0
|
{"hexsha": "91d94bc17301020a9968e8b82360017b54ba7f1c", "size": 1037, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/testInterpolations.jl", "max_stars_repo_name": "brenhinkeller/StatGeochemBase", "max_stars_repo_head_hexsha": "ead6e2c124191b43de7a3aecd49479bdd6512599", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-30T22:39:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-30T22:39:11.000Z", "max_issues_repo_path": "test/testInterpolations.jl", "max_issues_repo_name": "brenhinkeller/StatGeochemBase", "max_issues_repo_head_hexsha": "ead6e2c124191b43de7a3aecd49479bdd6512599", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-06-02T23:41:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-01T06:56:33.000Z", "max_forks_repo_path": "test/testInterpolations.jl", "max_forks_repo_name": "brenhinkeller/StatGeochemBase", "max_forks_repo_head_hexsha": "ead6e2c124191b43de7a3aecd49479bdd6512599", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.1363636364, "max_line_length": 85, "alphanum_fraction": 0.5988428158, "num_tokens": 510}
|
SST_PATH = "/Users/ccolley/Documents/Research/SparseSymmetricTensors.jl/src/SparseSymmetricTensors.jl" # local path
#SST_PATH = "/homes/ccolley/Documents/Software/SparseSymmetricTensors.jl/src/SparseSymmetricTensors.jl" #Nilpotent path
include(SST_PATH)
using Main.SparseSymmetricTensors
#===============================================================================
Contraction Codes
===============================================================================#
#TODO: adapt to work for any order tensor
function implicit_contraction(A::COOTen,B::COOTen,x::Array{Float64,1})
@assert length(x) == A.cubical_dimension*B.cubical_dimension
m = A.cubical_dimension
n = B.cubical_dimension
y = similar(x)
y .= 0
ileave = (i,j) -> i + m*(j-1)
for i in 1:length(A)
for (i_1,i_2,i_3) in permutations(A.indices[i,:])
for j in 1:length(B)
j_1,j_2,j_3 = B.indices[j,:]
y[ileave(i_1,j_1)] += 2*A.vals[i]*B.vals[j]*x[ileave(i_2,j_2)]*x[ileave(i_3,j_3)]
y[ileave(i_2,j_2)] += 2*A.vals[i]*B.vals[j]*x[ileave(i_1,j_1)]*x[ileave(i_3,j_3)]
y[ileave(i_3,j_3)] += 2*A.vals[i]*B.vals[j]*x[ileave(i_1,j_1)]*x[ileave(i_2,j_2)]
end
end
end
return y
end
#===============================================================================
Matching Code
===============================================================================#
#Computes the TAME score for this iterate by
function TAME_score(A::COOTen,B::COOTen,u::Array{Float64,1},v::Array{Float64,1})
Match_mapping, weight = rank_one_matching(u,v)
TAME_score(A,B,Match_mapping)
end
function TAME_score(A::COOTen,B::COOTen,U::Array{Float64,2},V::Array{Float64,2})
Match_mapping = low_rank_matching(U,V)
TAME_score(A,B,Match_mapping)
end
function TAME_score(A::COOTen,B::COOTen,X::Array{Float64,2};return_timings=false)
if return_timings
(_,_,matching,_) ,scoring_time = @timed bipartite_matching_primal_dual(X)
(triangle_count, gaped_triangles,inverted_matching), matching_time = @timed TAME_score(A,B,Dict(j => i for (i,j) in enumerate(matching)))
return triangle_count, gaped_triangles,inverted_matching, matching_time, matching_time
else
(_,_,matching,_) ,scoring_time = @timed bipartite_matching_primal_dual(X)
return TAME_score(A,B,Dict(j => i for (i,j) in enumerate(matching)))
end
end
function TAME_score(A::COOTen,B::COOTen,x::Array{Float64,1};return_timings=false)
if return_timings
(_,_,matching) ,matching_time = @timed bipartite_matching_primal_dual(reshape(x,A.cubical_dimension,B.cubical_dimension))
(triangle_count, gaped_triangles,inverted_matching), scoring_time = @timed TAME_score(A,B,Dict(j => i for (i,j) in enumerate(matching)))
return triangle_count, gaped_triangles,inverted_matching, matching_time,scoring_time
else
_,_,matching = bipartite_matching_primal_dual(reshape(x,A.cubical_dimension,B.cubical_dimension))
return TAME_score(A,B,Dict(j => i for (i,j) in enumerate(matching)))
end
end
function TAME_score(A::COOTen,B::COOTen,Match_mapping::Dict{Int,Int})
match_len = length(Match_mapping)
Triangle_check = Dict{Array{Int,1},Int}()
gaped_triangles = 0
triangle_count = 0
if A.unique_nnz > B.unique_nnz
for i in 1:A.unique_nnz
Triangle_check[A.indices[i,:]] = 1
end
#invert to map v indices to u
Match_mapping = Dict(value => key for (key, value) in Match_mapping)
for i in 1:B.unique_nnz
v_i,v_j,v_k = B.indices[i,:]
matched_triangle =
sort([get(Match_mapping,v_i,-1),get(Match_mapping,v_j,-1),get(Match_mapping,v_k,-1)])
# println(B.indices[i,:]," -> ",matched_triangle)
match = get(Triangle_check,matched_triangle,0)
if match == 1
triangle_count += 1
else
gaped_triangles += 1
end
end
else
for i in 1:B.unique_nnz
Triangle_check[B.indices[i,:]] = 1
end
for i in 1:A.unique_nnz
v_i,v_j,v_k = A.indices[i,:]
matched_triangle =
sort([get(Match_mapping,v_i,-1), get(Match_mapping,v_j,-1),get(Match_mapping,v_k,-1)])
match = get(Triangle_check,matched_triangle,0)
if match == 1
triangle_count += 1
else
gaped_triangles += 1
end
end
end
# triangles, triangle_weight = count_triangles(sub_A,sub_B)
return triangle_count, gaped_triangles, Match_mapping
end
function TAME_score(Triangle_Dict::Dict{Array{Int,1},Int},Input_tensor::COOTen,
Match_mapping::Dict{Int,Int})
triangle_count = 0
gaped_triangles = 0
for i in 1:Input_tensor.unique_nnz
v_i,v_j,v_k = Input_tensor.indices[i,:]
matched_triangle =
sort([get(Match_mapping,v_i,-1),get(Match_mapping,v_j,-1),get(Match_mapping,v_k,-1)])
match = get(Triangle_Dict,matched_triangle,0)
if match == 1
triangle_count += 1
else
gaped_triangles += 1
end
end
return triangle_count, gaped_triangles, Match_mapping
end
function search_Krylov_space(A::COOTen,B::COOTen,U::Array{Float64,2},V::Array{Float64,2})
best_score = -1
best_i = -1
best_j = -1
best_matching = Dict{Int,Int}()
Triangle_check = Dict{Array{Int,1},Int}()
if A.unique_nnz > B.unique_nnz
for i in 1:A.unique_nnz
Triangle_check[A.indices[i,:]] = 1
end
Input_tensor = B
else
for i in 1:B.unique_nnz
Triangle_check[B.indices[i,:]] = 1
end
Input_tensor = A
end
for i in 1:size(U,2)
for j in 1:size(V,2)
if A.unique_nnz > B.unique_nnz
matched_tris, gaped_tris, matching = TAME_score(Triangle_check,Input_tensor,V[:,j],U[:,i])
else
matched_tris, gaped_tris, matching = TAME_score(Triangle_check,Input_tensor,U[:,i],V[:,j])
end
if matched_tris > best_score
best_matching = matching
best_score = matched_tris
best_i = i
best_j = j
end
end
end
return best_score, best_i, best_j, best_matching
end
#===============================================================================
TAME Implementations Code
===============================================================================#
#
# Parameter Searching Code
#
function align_tensors(A::COOTen,B::COOTen;method::String="LambdaTAME",
no_matching=false,kwargs...)
#put larger tensor on the left
if B.n > A.n
results = align_tensors(B,A;method = method, no_matching=no_matching,kwargs...)
#flip the matchings if A and B were swapped
if method == "LambdaTAME" || method == "LowRankTAME"
best_TAME_PP_tris, max_triangle_match, U_best, V_best, best_matching = results
return best_TAME_PP_tris, max_triangle_match, U_best, V_best, Dict((j,i) for (i,j) in best_matching)
elseif method == "TAME"
best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, best_matching = results
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, Dict((j,i) for (i,j) in best_matching)
end
end
if method == "LambdaTAME"
return ΛTAME_param_search(A,B;kwargs...)
elseif method == "LowRankTAME"
return LowRankTAME_param_search(A,B;no_matching = no_matching,kwargs...)
elseif method == "TAME"
return TAME_param_search(A,B;no_matching = no_matching,kwargs...)
else
throw(ArgumentError("method must be one of 'LambdaTAME', 'LowRankTAME',or 'TAME'."))
end
end
function align_tensors_profiled(A::COOTen,B::COOTen; method::String="LambdaTAME",
no_matching=false,kwargs...)
#put larger tensor on the left
if B.n > A.n
results = align_tensors_profiled(B,A;method = method, no_matching=no_matching,kwargs...)
#flip the matchings if A and B were swapped
if method == "LambdaTAME" || method == "LowRankTAME"
best_TAME_PP_tris, max_triangle_match, U_best, V_best, best_matching,profile = results
return best_TAME_PP_tris, max_triangle_match, U_best, V_best, Dict((j,i) for (i,j) in best_matching), profile
elseif method == "TAME"
best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, best_matching,profile = results
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, Dict((j,i) for (i,j) in best_matching), profile
end
end
if method == "LambdaTAME"
return ΛTAME_param_search_profiled(A,B;kwargs...)
elseif method == "LowRankTAME"
return LowRankTAME_param_search_profiled(A,B;no_matching = no_matching,kwargs...)
elseif method == "TAME"
return TAME_param_search_profiled(A,B;no_matching = no_matching,kwargs...)
else
throw(ArgumentError("method must be one of 'LambdaTAME', 'LowRankTAME', or 'TAME'."))
end
end
function ΛTAME_param_search_profiled(A::COOTen,B::COOTen; iter::Int = 15,tol::Float64=1e-6,
alphas::Array{F,1}=[.5,1.0],
betas::Array{F,1} =[1000.0,100.0,10.0,1.0,0.0,0.1,0.01,0.001]) where {F <: AbstractFloat}
max_triangle_match = min(size(A.indices,1),size(B.indices,1))
best_TAME_PP_tris = -1
best_i = -1
best_j = -1
best_matching = Dict{Int,Int}()
if Ten == COOTen
m = A.cubical_dimension
n = B.cubical_dimension
else
m = A.n
n = B.n
end
results = Dict(
"TAME_timings" => Array{Float64,1}(undef,length(alphas)*length(betas)),
"Krylov Timings"=> Array{Float64,1}(undef,length(alphas)*length(betas))
)
exp_index = 1
U = Array{Float64,2}(undef,m,iter)
V = Array{Float64,2}(undef,n,iter)
for α in alphas
for beta in betas
((U,V),runtime) = @timed ΛTAME(A,B,beta,iter,tol,α)
results["TAME_timings"][exp_index] = runtime
#search the Krylov Subspace
((search_tris, i, j, matching),runtime) = @timed search_Krylov_space(A,B,U,V)
results["Krylov Timings"][exp_index] = runtime
exp_index += 1
if search_tris > best_TAME_PP_tris
best_matching = matching
best_TAME_PP_tris = search_tris
best_i = i
best_j = j
end
println("α:$(α) -- β:$(beta) finished -- tri_match:$search_tris -- max_tris $(max_triangle_match) -- best tri_match: $best_TAME_PP_tris")
end
end
println("best i:$best_i -- best j:$best_j")
return best_TAME_PP_tris, max_triangle_match, U[:,best_i], V[:,best_j], best_matching, results
end
#add in SparseSymmetricTensors.jl function definitions
function TAME_param_search(A::COOTen,B::COOTen;iter::Int = 15,tol::Float64=1e-6,
alphas::Array{F,1}=[.5,1.0],
betas::Array{F,1} =[1000.0,100.0,10.0,1.0,0.0,0.1,0.01,0.001],
kwargs...) where {F <: AbstractFloat}
max_triangle_match = min(size(A.indices,1),size(B.indices,1))
total_triangles = size(A.indices,1) + size(B.indices,1)
best_TAME_PP_tris::Int = -1
best_matching = Dict{Int,Int}()
m = A.cubical_dimension
n = B.cubical_dimension
best_TAME_PP_x = Array{Float64,2}(undef,m,n)
for α in alphas
for β in betas
x, triangle_count, matching = TAME(A,B,β,iter,tol,α;W = ones(m,n),kwargs...)
if triangle_count > best_TAME_PP_tris
best_matching = matching
best_TAME_PP_tris = triangle_count
best_TAME_PP_x = copy(x)
end
println("α:$(α) -- β:$β finished -- tri_match:$(triangle_count) -- max_tris $(max_triangle_match) -- best tri_match:$(best_TAME_PP_tris)")
end
end
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, best_matching
end
function TAME_param_search_profiled(A::COOTen,B::COOTen;iter::Int = 15,tol::Float64=1e-6,
alphas::Array{F,1}=[.5,1.0],
betas::Array{F,1} =[1000.0,100.0,10.0,1.0,0.0,0.1,0.01,0.001],
profile::Bool=false,profile_aggregation="all",
kwargs...) where {F <: AbstractFloat}
max_triangle_match = min(size(A.indices,1),size(B.indices,1))
total_triangles = size(A.indices,1) + size(B.indices,1)
best_TAME_PP_tris = -1
best_matching = Dict{Int,Int}()
m = A.cubical_dimension
n = B.cubical_dimension
best_TAME_PP_x = Array{Float64,2}(undef,m,n)
experiment_profiles = Array{Tuple{String,Dict{String,Union{Array{Float64,1},Array{Array{Float64,1},1}}}},1}(undef,0)
for α in alphas
for β in betas
x, triangle_count, matching, experiment_profile = TAME_profiled(A,B,β,iter,tol,α;W = ones(m,n),kwargs...)
push!(experiment_profiles,("α:$(α)_β:$(β)",experiment_profile))
if triangle_count > best_TAME_PP_tris
best_matching = matching
best_TAME_PP_tris = triangle_count
best_TAME_PP_x = copy(x)
end
println("α:$(α) -- β:$β finished -- tri_match:$(best_TAME_PP_tris) -- max_tris $(max_triangle_match)")
end
end
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_x, best_matching, experiment_profiles
end
function LowRankTAME_param_search(A::COOTen,B::COOTen;iter::Int = 15,tol::Float64=1e-6,
U_0::Array{Float64,2} = ones(A.n,1), V_0::Array{Float64,2} = ones(B.n,1),
alphas::Array{F,1}=[.5,1.0],
betas::Array{F,1} =[1000.0,100.0,10.0,1.0,0.0,0.1,0.01,0.001],
kwargs...) where {F <: AbstractFloat}
max_triangle_match = min(size(A.indices,1),size(B.indices,1))
total_triangles = size(A.indices,1) + size(B.indices,1)
best_TAME_PP_tris = -1
best_matching = Dict{Int,Int}()
m = A.cubical_dimension
n = B.cubical_dimension
best_TAME_PP_U = ones(m,1)
best_TAME_PP_V = ones(n,1)
for α in alphas
for β in betas
U, V, triangle_count,matching =
LowRankTAME(A,B,U_0,V_0,β,iter,tol,α;kwargs...)
if triangle_count > best_TAME_PP_tris
best_TAME_PP_tris = triangle_count
best_matching = matching
best_TAME_PP_U = copy(U)
best_TAME_PP_V = copy(V)
end
println("α:$(α) -- β:$(β) -- tri_match:$(triangle_count) -- max_tris:$(max_triangle_match) -- best tri match:$best_TAME_PP_tris")
end
end
return best_TAME_PP_tris, max_triangle_match, best_TAME_PP_U, best_TAME_PP_V, best_matching
end
#
# Spectral Relaxation Code
#
function ΛTAME(A::COOTen, B::COOTen, β::Float64, max_iter::Int,
tol::Float64,α::Float64;update_user::Int=-1)
U = zeros(A.cubical_dimension,max_iter+1)
V = zeros(B.cubical_dimension,max_iter+1) #store initial in first column
U[:,1] = ones(A.cubical_dimension)
U[:,1] /=norm(U[:,1])
V[:,1] = ones(B.cubical_dimension)
V[:,1] /=norm(U[:,1])
sqrt_β = β^(.5)
lambda = Inf
i = 1
while true
U[:,i+1] = contract_k_1(A,U[:,i])
V[:,i+1] = contract_k_1(B,V[:,i])
lambda_A = (U[:,i+1]'*U[:,i])
lambda_B = (V[:,i+1]'*V[:,i])
new_lambda = lambda_A*lambda_B
if β != 0.0
U[:,i+1] .+= sqrt_β*U[:,i+1]
V[:,i+1] .+= sqrt_β*V[:,i+1]
end
if α != 1.0
U[:,i+1] = α*U[:,i+1] + (1 -α)*U[:,1]
V[:,i+1] = α*V[:,i+1] + (1 -α)*V[:,1]
end
U[:,i+1] ./= norm(U[:,i+1])
V[:,i+1] ./= norm(V[:,i+1])
if update_user != -1 && i % update_user == 0
println("iteration $(i) λ_A: $(lambda_A) -- λ_B: $(lambda_B) -- newλ: $(new_lambda)")
end
if abs(new_lambda - lambda) < tol || i >= max_iter
return U[:,1:i], V[:,1:i]
else
lambda = new_lambda
i += 1
end
end
end
#=
TODO: currently missing COOTen Implementations for
- LowRankTAME
- LowRankTAME_profiled
- TAME
- TAME_profiled
=#
#===============================================================================
PostProcessing Code
===============================================================================#
"""-----------------------------------------------------------------------------
Computes a post processing routine on the optimal aligment produced by
Λ-TAME. The algorithm runs a b-matching to find suitable replacements
and computes whether or not to make the swap if the number of aligned
triangles increases.
Inputs:
-------
{A,B} - (COOTen)
Coordinate representations of adjacency tensors for the motif induced
hypergraphs.
{u,v} - (Array{Float64,1})
The components to the rank-1 alignment matrix which offered the best
alignment scores.
iterations - (Int)
The number of iterations to run the post-processing algorithm for.
b - (Int)
The size of the b-matching.
-----------------------------------------------------------------------------"""
function post_process(A::COOTen,B::COOTen,u::Array{Float64,1},
v::Array{Float64,1},iterations::Int,b::Int)
@assert length(u) <= length(v)
Matching = rank_one_matching(u,v)
A_incidence = produce_triangle_incidence(A.indices,A.cubical_dimension)
B_incidence = produce_triangle_incidence(B.indices,B.cubical_dimension)
potential_matchings = b_matching(u,v,b)
u_sum = sum(u)
v_sum = sum(v)
match_q = Queue{Tuple{Int,Int,Float64}}()
for _ in 1:iterations
#sort by ∑_i x(ii') + ∑_i' x(ii')
[enqueue!(q,z) for z in sort(potential_matchings,by=x->u[x[1]]*u_sum+ v[x[2]]*v_sum)]
while length(q) != 0
i,ip,_ = dequeue!(match_q)
matched_tris = compute_triangles_aligned(A_incidence[i],B_incidence[ip],Matching)
#TODO: optimize this later
Pref_i = [k for (j,k,_) in potential_matchings if j == i]
Pref_ip = [k for (j,k,_) in potential_matchings if j == ip]
end
end
end
|
{"hexsha": "f5da7e40eee7976c5a29fbd198d815ed95254310", "size": 18825, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SparseSymmetricTensorCode.jl", "max_stars_repo_name": "chuckcol/LambdaTAME", "max_stars_repo_head_hexsha": "5c2192270def06a078cc4821c52b7905b937c0fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/SparseSymmetricTensorCode.jl", "max_issues_repo_name": "chuckcol/LambdaTAME", "max_issues_repo_head_hexsha": "5c2192270def06a078cc4821c52b7905b937c0fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-16T14:30:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-16T15:39:36.000Z", "max_forks_repo_path": "src/SparseSymmetricTensorCode.jl", "max_forks_repo_name": "chuckcol/LambdaTAME", "max_forks_repo_head_hexsha": "5c2192270def06a078cc4821c52b7905b937c0fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4369449378, "max_line_length": 150, "alphanum_fraction": 0.5782735724, "num_tokens": 5273}
|
import random
from typing import Dict, List, Optional, Tuple, Union
import networkx
import networkx as nx
import numpy as np
import pandas as pd
from ipycytoscape import CytoscapeWidget
import halerium.core as hal
def show_hal_graph(g: hal.Graph) -> CytoscapeWidget:
deps = dependencies_from_hal_graph(g)
return show_graph(deps)
def show_graph(
dependencies: List[Tuple[str, str]]
) -> Union[Tuple[Dict, List], CytoscapeWidget]:
elements, style, layout = get_cytoscape_params(dependencies, True)
cytoscapeobj = CytoscapeWidget()
cytoscapeobj.graph.add_graph_from_json(elements)
cytoscapeobj.set_style(style)
cytoscapeobj.cytoscape_layout = layout
return cytoscapeobj
def get_cytoscape_params(
edgelist: List[Tuple], nodes: Optional[List[str]] = None, for_jupyter: bool = False
):
edges = [
{"data": {"id": f"{s}->{t}", "source": s, "target": t}} for s, t in edgelist
]
node_iter = set(sum(edgelist, ())) if nodes is None else nodes
nodes = [{"data": {"id": n}} for n in node_iter]
style = [
{"selector": "core", "style": {"background": "blue"}},
{
"selector": "node",
"style": {
"label": "data(id)",
"background-color": "gray",
"font-size": "2em",
"text-valign": "center",
"text-halign": "center",
},
},
{
"selector": "edge",
"style": {
"curve-style": "bezier",
"target-arrow-shape": "triangle",
"width": 5,
# "line-color": "#ddd",
"background-fill": "linear-gradient(yellow, darkgray)",
"target-arrow-color": "darkgray",
},
},
]
layout = {"name": "breadthfirst", "directed": True, "circle": True}
if for_jupyter:
elements = {"nodes": nodes, "edges": edges}
else:
elements = nodes + edges
return elements, style, layout
def get_node_name(variable_name: str) -> str:
return variable_name.replace("graph/var_", "")
def is_regular_variable(variable_name: str) -> bool:
return variable_name.startswith("graph/var_")
def dependencies_from_hal_graph(g: hal.Graph) -> List:
g_viz = hal.gui.GraphVisualizer(g)
g_dict = dict(g_viz._get_json())
edges = []
for edge in g_dict["variable_dependencies"]["graph"]:
src = edge["source"]
dst = edge["target"]
if is_regular_variable(src) and is_regular_variable(dst):
src_name = get_node_name(src)
dst_name = get_node_name(dst)
edges.append((src_name, dst_name))
return edges
def dependencies_to_adjacency_matrix(
deps: List[List], columns: List[str]
) -> pd.DataFrame:
"""
:param deps:
:param columns:
:return:
"""
rows = {c: {c: 0} for c in columns}
# nodes = set()
for src, targets in deps:
assert isinstance(src, str), "multiple sources are currently not supported"
if not isinstance(targets, list):
targets = [targets]
# nodes.update([src] + targets) # keep track of all observed nodes to obtain full adj matrix
target_cols = rows.get(src, {})
new_cols = {c: 1 for c in targets}
rows[src] = {**target_cols, **new_cols}
# add rows for nodes without any outgoing arrows
# if columns is None:
# columns = list(rows.keys())
# for name in set(columns) - set(rows.keys()):
# rows[name] = {
# name: 0
# } # empty entries would be dropped at initialization -> in valid DAG no node points to
# # itself
#
df_mat = pd.DataFrame.from_dict(rows, orient="index", columns=columns).fillna(0)
# make sure both dimensions have the same order, as expected from a adj. matrix
return df_mat.astype(int)
def adjacency_matrix_to_deps(mat: np.array, topo: List[str], keep_empty: bool = False) -> List:
"""
Convert an adjacency matrix to a list of dependencies.
A dependency itself is a list of length 2, where the first element is the source node and
the second element is a list of destination nodes.
:param mat: the adjacency matrix as a 2D numpy array
:param topo: a list of nodes sorted in topological order
:param keep_empty: a flag specifying whether to add nodes with no outgoing edges as well
:return: a list of dependencies
"""
edges = [(topo[r], topo[c]) for r, c in np.argwhere(mat > 0)]
if keep_empty:
used_nodes = set([src for src, targets in edges])
edges += [(node, []) for node in topo if node if node not in used_nodes]
return edges
def adjacency_matrix_to_deps2(df_mat: pd.DataFrame, row_per_edge: bool = True, keep_empty: bool = False) -> List:
"""
Convert an adjacency matrix to a list of dependencies.
A dependency itself is a list of length 2, where the first element is the source node and
the second element is a list of destination nodes.
:param df_mat: the adjacency matrix as pandas DataFrame
:return: a list of dependencies
"""
dep_list = [
[src, s_adj[s_adj > 0].index.tolist()]
for src, s_adj in df_mat.iterrows()
if keep_empty or sum(s_adj) > 0
]
if not row_per_edge:
return dep_list
edgelist = [(src, target) for src, targets in dep_list for target in targets]
return edgelist
# def generate_random_dag(
# nodes: List, p: float, as_dataframe: bool = True
# ) -> Union[np.ndarray, pd.DataFrame]:
def generate_random_dag(nodes: List, p: float) -> Tuple[np.ndarray, List[str]]:
# approach found on stack overflow: https://stackoverflow.com/questions/13543069/how-to-create-random-single-source-random-acyclic-directed-graphs-with-negative
n = len(nodes)
random_graph = networkx.fast_gnp_random_graph(n, p, directed=True)
random_dag = networkx.DiGraph([(i, j) for (i, j) in random_graph.edges() if i < j])
# due to the filtering nodes can get lost -> ensure resulting DAG has the desired number of nodes
if random_dag.number_of_nodes() < n:
random_dag.add_nodes_from(random_graph.nodes)
# validate resulting DAG. An invalid DAG means there is a major bug in the creation
if not networkx.is_directed_acyclic_graph(random_dag):
raise ValueError("Created graph is not a valid DAG")
# assign random labels to generated nodes in DAG
rand_nodes = nodes[:] # create shallow copy to avoid altering input argument
random.shuffle(rand_nodes)
node_mapping = {i: n for i, n in enumerate(rand_nodes)}
# sorted_nodes = sorted(nodes)
random_dag = networkx.relabel_nodes(random_dag, node_mapping)
df = networkx.convert_matrix.to_pandas_adjacency(random_dag, nodes).astype(int)
topology = list(nx.topological_sort(random_dag))
df_sorted = df.loc[topology][topology]
return df_sorted.values, topology
# return (
# networkx.convert_matrix.to_numpy_array(random_dag, nodes).astype(int),
# rand_nodes,
# )
|
{"hexsha": "cb8e599c2d0d4c8395a72a32e900426f2ed176fd", "size": 7095, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils.py", "max_stars_repo_name": "Magier/HalGA", "max_stars_repo_head_hexsha": "db560ab219824f0e0556450becc9226d3b4949f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/utils.py", "max_issues_repo_name": "Magier/HalGA", "max_issues_repo_head_hexsha": "db560ab219824f0e0556450becc9226d3b4949f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils.py", "max_forks_repo_name": "Magier/HalGA", "max_forks_repo_head_hexsha": "db560ab219824f0e0556450becc9226d3b4949f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9473684211, "max_line_length": 164, "alphanum_fraction": 0.6405919662, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1755}
|
from sympy.combinatorics import Permutation
from sympy.core import Basic
from sympy.combinatorics.permutations import perm_af_mul, \
_new_from_array_form, perm_af_commutes_with, perm_af_invert, perm_af_muln
from random import randrange, choice
from sympy.functions.combinatorial.factorials import factorial
from math import log
from sympy.ntheory import isprime, sieve
from sympy.combinatorics.util import _check_cycles_alt_sym,\
_distribute_gens_by_base, _orbits_transversals_from_bsgs,\
_handle_precomputed_bsgs, _base_ordering, _strong_gens_from_distr, _strip
def _smallest_change(h, alpha):
"""
find the smallest point not fixed by `h`
"""
for i in range(alpha, len(h)):
if h[i] != i:
return i
class _Vertex(object):
"""
vertex of JGraph
neighbor list of neighbor vertices
perm list of permutations
index_neighbor list of index position of vertices in neighbor
if vertex j is a neighbor of vertex i, then
vertex[i].index_neighbor[ vertex[i].neighbor[j] ] = j
if vertex j is not a neighbor of vertex i,
vertex[i].index_neighbor[j] = -1
n size of permutation
"""
def __init__(self, n):
self.neighbor = []
self.perm = []
self.index_neighbor = [-1]*n
class _JGraph(object):
"""
Jerrum graph
vertex vertices of the Jerrum graph for permutation group G < S(n);
vertex[i] i-th vertex, with `i` in range(n)
jg array of Jerrums generators (edges of the graph)
jgs number of occupied entries of jg
freejg stack of slots of jg. freejg[i] points to the
i-th free position of jg
To a directed edge (i, j) between vertices i, j
it is associated the index of a permutation `g` satisfying
g[i] = j
g = jg[vertex[i].perm[vertex[i].index_neighbor[j]]]
= jg[vertex[j].perm[vertex[j].index_neighbor[i]]]
cycle list of indices of vertices used to identify cycles
G Permutation group
n size of permutation
r number of generators
"""
def __init__(self, G):
n = G._degree
self.vertex = [_Vertex(n) for i in range(n)]
self.gens = [p.array_form for p in G._generators]
self.jg = [None]*n
self.jgs = 0
self.freejg = []
self.cycle = []
self.G = G
self.n = G._degree
self.r = G._r
self.idn = range(n)
def find_cycle(self, v, v1, v2, prev):
"""
find if there is a cycle
v vertex from which start searching a cycle
v1, v2 vertices through which the cycle must go
prev previous vertex
"""
cycle = self.cycle
neighbor = self.vertex[v].neighbor
if v1 in neighbor:
if v1 != prev:
return True
if v2 in neighbor:
if v2 != prev:
cycle.append(v2)
if self.find_cycle(v2, v1, v2, v):
return True
cycle.pop()
for u in neighbor:
if u != prev:
cycle.append(u)
if self.find_cycle(u, v1, v2, v):
return True
cycle.pop()
return False
def insert(self, g, alpha):
"""
insert permutation `g` in stabilizer chain at point alpha
"""
n = len(g)
if not g == self.idn:
vertex = self.vertex
jg = self.jg
i = _smallest_change(g, alpha)
ig = g[i]
nn = vertex[i].index_neighbor[ig]
if nn >= 0: # if ig is already neighbor of i
jginn = jg[vertex[i].perm[nn]]
if g != jginn:
# cycle consisting of two edges;
# replace jginn by g and insert h = g**-1*jginn
g1 = perm_af_invert(g)
h = perm_af_mul(g1, jginn)
jg[ vertex[i].perm[nn] ] = g
self.insert(h, alpha)
else: # new edge
self.insert_edge(g, i, ig)
self.cycle = [i]
if self.find_cycle(i, i, ig, -1):
cycle = self.cycle
cycle.append(cycle[0])
# find the smallest point (vertex) of the cycle
minn = min(cycle)
cmin = cycle.index(minn)
# now walk around the cycle starting from the smallest
# point, and multiply around the cycle to obtain h
# satisfying h[cmin] = cmin
ap = []
for c in range(cmin, len(cycle)-1) + range(cmin):
i = cycle[c]
j = cycle[c+1]
nn = vertex[i].index_neighbor[j]
p = jg[ vertex[i].perm[nn] ]
if i > j:
p = perm_af_invert(p)
ap.append(p)
ap.reverse()
h = perm_af_muln(*ap)
self.remove_edge(cycle[cmin], cycle[cmin + 1])
self.insert(h, alpha)
def insert_edge(self, g, i, ig):
"""
insert edge (permutation g) moving i to ig (i < ig)
"""
vertex = self.vertex
self.jgs += 1
jgslot = self.freejg.pop() # the last free generator place
self.jg[jgslot] = g
nn = len(vertex[i].neighbor)
vertex[i].neighbor.append(ig)
vertex[i].perm.append(jgslot)
vertex[i].index_neighbor[ig] = nn
nn = len(vertex[ig].neighbor)
vertex[ig].neighbor.append(i)
vertex[ig].perm.append(jgslot)
vertex[ig].index_neighbor[i] = nn
def jerrum_filter(self, alpha, cri):
"""
filter the generators of the stabilizer subgroup G_alpha
alpha point for which the stabilizer is computed
cri[i] inverse of G._coset_repr[i] if `i` is not None
Schreier lemma: the stabilizer subgroup G_alpha of G
is generated by the schreier generators
h = cosrep[ p2[i] ]**-1 * g[j] * cosrep[i]
where j=0,..,len(gens)-1 and i=0,..,n-1, where n is the degree.
Proof that h belongs to G_alpha:
cosrep[k][alpha] = k for all k; cosrep[k]**-1[k] = alpha
p1 = cosrep[i]; p2 = g[j]
p3 = cosrep[ p2[i] ]; p3[alpha] = p2[i]
p3**-1[p2[i] = alpha
p3**-1[p2[p1[alpha]] = alpha, so h[alpha] = alpha
Using Jerrum's filter one can reduce the len(gens)*n generators
of G_alpha produced by the Schreier lemma to at most n-1
Jerrum's filter:
(see Cameron 'Permutation groups', page 22)
_JGraph has n-1 vertices; the edges (i, j) are labelled by
group elements `g` with j = imin(g) = min(i | g[i] != i);
define m(graph) = sum(imin(g) for g in graph)
At the beginning the graph has no edges, so it is
an acyclic graph.
Insert a group element `g` produced by the Schreier lemma;
introduce in _JGraph an edge (imin(g), g[imin(g));
if the graph contains a cycle,
let `i0` be the smallest point in the cycle, and `h` the
product of the group elements labelling the edges in the cycle,
starting from `i0`; h[j] = j for j <= i0;
modify it eliminating the edge (i0, g0[i0])
in the cycle; one obtains a new acyclic graph with
m(graph_new) > m(graph). `g0` can be expressed as a product
of `h` and the other elements in the cycle.
Then insert `h` in the graph, and so on.
Since m < n**2, this process ends after
a finite number of times, so in the end one remains
with an acyclic graph, with at most n-1 edges and
the same number of generators.
"""
n = self.n
r = self.r
G = self.G
gens = self.gens
cosrep = G._coset_repr
self.jgs = 0
for j in range(n):
self.jg[j] = None
self.freejg = range(n)
for i in range(n):
self.vertex[i].neighbor = []
self.vertex[i].perm = []
for i in range(n):
for j in range(n):
self.vertex[i].index_neighbor[j] = -1
for i in range(n):
if cosrep[i] != None:
p1 = cosrep[i]
for j in range(r):
p2 = gens[j]
p3 = cri[ p2[i] ]
h = [p3[p2[k]] for k in p1]
self.insert(h, alpha)
r = 0
for j in range(n):
if self.jg[j] != None:
gens[r] = self.jg[j]
r += 1
self.r = r
def remove_edge(self, i, ig):
"""
remove edge from i to ig
"""
vertex = self.vertex
# remove the permutation labelling this edge
self.jgs -= 1
jgslot = vertex[i].perm[ vertex[i].index_neighbor[ig] ]
self.jg[jgslot] = None
self.freejg.append(jgslot) # now we gained a free place
for i1, i2 in ((i, ig), (ig, i)):
v = vertex[i1]
j0 = v.index_neighbor[i2]
v.neighbor.pop(j0)
v.perm.pop(j0)
# the index of the vertices >= j0 in vertex[i] has changed
for j in range(j0, len(v.neighbor)):
v.index_neighbor[ v.neighbor[j] ] = j
v.index_neighbor[ig] = -1
def schreier_tree(self, alpha, gen):
"""
traversal of the orbit of alpha
Compute a traversal of the orbit of alpha, storing the values
in G._coset_repr; G._coset_repr[i][alpha] = i if i belongs
to the orbit of alpha.
"""
G = self.G
G._coset_repr[alpha] = gen
G._coset_repr_n += 1
genv = self.gens[:self.r]
h = 0
r = self.r
stg = [gen]
sta = [alpha]
pos = [0]*self.n
while 1:
# backtrack when finished iterating over generators
if pos[h] >= r:
if h == 0:
return
pos[h] = 0
h -= 1
sta.pop()
stg.pop()
continue
g = genv[pos[h]]
pos[h] += 1
alpha = sta[-1]
ag = g[alpha]
if G._coset_repr[ag] == None:
gen1 = perm_af_mul(g, stg[-1])
G._coset_repr[ag] = gen1
G._coset_repr_n += 1
sta.append(ag)
stg.append(gen1)
h += 1
class PermutationGroup(Basic):
"""
The class defining a Permutation group.
Permutation(generator_list) returns the permutation group
generated by permutation_list.
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G
PermutationGroup([Permutation([0, 2, 1]), Permutation([1, 0, 2])])
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[2] Seress, A.
"Permutation group algorithms"
[3] http://en.wikipedia.org/wiki/Schreier_vector
[4] http://en.wikipedia.org/wiki/Nielsen_transformation
#Product_replacement_algorithm
[5] Frank Celler, Charles R.Leedham-Green, Scott H.Murray,
Alice C.Niemeyer, and E.A.O'Brien. "Generating random
elements of a finite group"
[6] http://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29
[7] http://www.algorithmist.com/index.php/Union_Find
[8] http://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups
[9] http://en.wikipedia.org/wiki/Center_%28group_theory%29
[10] http://en.wikipedia.org/wiki/Centralizer_and_normalizer
[11] http://groupprops.subwiki.org/wiki/Derived_subgroup
[12] http://en.wikipedia.org/wiki/Nilpotent_group
"""
def __eq__(self, gr):
"""
test if two groups have the same elements
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = [[1,2,0,3,4,5], [1,0,2,3,4,5], [2,1,0,3,4,5], [1,2,0,3,4,5]]
>>> a = [Permutation(p) for p in a]
>>> g = Permutation([1,2,3,4,5,0])
>>> G1,G2,G3 = [PermutationGroup(x) for x in [a[:2],a[2:4],[g,g**2]]]
>>> assert G1.order() == G2.order() == G3.order() == 6
>>> assert G1 == G2 and G1 != G3
"""
if self.degree != gr.degree:
return False
if self.order() != gr.order():
return False
gens1 = self.generators
for g in gens1:
if not gr.has_element(g):
return False
return True
def __mul__(self, other):
"""
Return the direct product of two permutation groups as a permutation
group.
This implementation realizes the direct product by shifting
the index set for the generators of the second group: so if we have
G acting on n1 points and H acting on n2 points, G*H acts on n1+n2
points.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> G = CyclicGroup(5)
>>> H = G*G
>>> H
PermutationGroup([Permutation([1, 2, 3, 4, 0, 5, 6, 7, 8, 9]),
Permutation([0, 1, 2, 3, 4, 6, 7, 8, 9, 5])])
>>> H.order()
25
"""
gens1 = [perm.array_form for perm in self.generators]
gens2 = [perm.array_form for perm in other.generators]
n1 = self.degree
n2 = other.degree
start = range(n1)
end = range(n1, n1 + n2)
for i in range(len(gens2)):
gens2[i] = [x + n1 for x in gens2[i]]
gens2 = [start + gen for gen in gens2]
gens1 = [gen + end for gen in gens1]
together = gens1 + gens2
gens = [_new_from_array_form(x) for x in together]
return PermutationGroup(gens)
def __ne__(self, gr):
return not self == gr
def __new__(cls, *args, **kw_args):
"""
The default constructor.
"""
obj = Basic.__new__(cls, *args, **kw_args)
obj._generators = args[0]
obj._order = None
obj._center = []
obj._is_abelian = None
obj._is_transitive = None
obj._is_sym = None
obj._is_alt = None
obj._is_primitive = None
obj._is_nilpotent = None
obj._is_solvable = None
obj._is_trivial = None
obj._transitivity_degree = None
obj._max_div = None
size = len(args[0][0].array_form)
obj._r = len(obj._generators)
if not all(len(args[0][i].array_form)==size for i in xrange(1, len(args[0]))):
raise ValueError("Permutation group size is not correct")
obj._degree = size
# these attributes are assigned after running schreier_sims
obj._base = []
obj._coset_repr = []
obj._coset_repr_n = []
obj._stabilizers_gens = []
obj._strong_gens = []
obj._basic_orbits = []
obj._transversals = []
# these attributes are assigned after running _random_pr_init
obj._random_gens = []
return obj
def _random_pr_init(self, r, n, _random_prec_n=None):
r"""
Initializes random generators for the product replacement algorithm.
The implementation uses a modification of the original product
replacement algorithm due to Leedham-Green, as described in [1],
pp.69-71; also, see [2], pp.27-29 for a detailed theoretical
analysis of the original product replacement algorithm, and [4].
The product replacement algorithm is used for producing random,
uniformly distributed elements of a group `G` with a set of generators
`S`. For the initialization ``_random_pr_init``, a list `R` of
`\max\{r, |S|\}` group generators is created as the attribute
``G._random_gens``, repeating elements of `S` if necessary, and the
identity element of `G` is appended to `R` - we shall refer to this
last element as the accumulator. Then the function ``random_pr()``
is called ``n`` times, randomizing the list `R` while preserving
the generation of `G` by `R`. The function ``random_pr()`` itself
takes two random elements `g, h` among all elements of `R` but
the accumulator and replaces `g` with a randomly chosen element
from `\{gh, g(~h), hg, (~h)g\}`. Then the accumulator is multiplied
by whatever `g` was replaced by. The new value of the accumulator is
then returned by ``random_pr()``.
The elements returned will eventually (for ``n`` large enough) become
uniformly distributed across `G` ([5]). For practical purposes however,
the values ``n = 50, r = 11`` are suggested in [1].
Notes
=====
XXX THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute
self._random_gens
See Also
========
random_pr
"""
deg = self.degree
random_gens = self.generators[:]
k = len(random_gens)
if k < r:
for i in range(k, r):
random_gens.append(random_gens[i - k])
acc = _new_from_array_form(range(deg))
random_gens.append(acc)
self._random_gens = random_gens
# handle randomized input for testing purposes
if _random_prec_n == None:
for i in range(n):
self.random_pr()
else:
for i in range(n):
self.random_pr(_random_prec = _random_prec_n[i])
def _union_find_merge(self, first, second, ranks, parents, not_rep):
"""
Merges two classes in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp.83-87. The class merging process uses union by rank as an
optimization. ([7])
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, the list of class sizes, ``ranks``, and the list of
elements that are not representatives, ``not_rep``, are changed due to
class merging.
See Also
========
minimal_block, _union_find_rep
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[7] http://www.algorithmist.com/index.php/Union_Find
"""
rep_first = self._union_find_rep(first, parents)
rep_second = self._union_find_rep(second, parents)
if rep_first != rep_second:
# union by rank
if ranks[rep_first] >= ranks[rep_second]:
new_1, new_2 = rep_first, rep_second
else:
new_1, new_2 = rep_second, rep_first
total_rank = ranks[new_1] + ranks[new_2]
if total_rank > self.max_div:
return -1
parents[new_2] = new_1
ranks[new_1] = total_rank
not_rep.append(new_2)
return 1
return 0
def _union_find_rep(self, num, parents):
"""
Find representative of a class in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp.83-87. After the representative of the class to which ``num``
belongs is found, path compression is performed as an optimization
([7]).
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, is altered due to path compression.
See Also
========
minimal_block, _union_find_merge
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[7] http://www.algorithmist.com/index.php/Union_Find
"""
rep, parent = num, parents[num]
while parent != rep:
rep = parent
parent = parents[rep]
# path compression
temp, parent = num, parents[num]
while parent != rep:
parents[temp] = rep
temp = parent
parent = parents[temp]
return rep
@property
def base(self):
"""
Return a base from the Schreier-Sims algorithm.
For a permutation group `G`, a base is a sequence of points
`B = (b_1, b_2, ..., b_k)` such that no element of `G` apart from the
identity fixes all the points in `B`. The concepts of a base and
strong generating set and their applications are discussed in depth
in [1],pp.87-89 and [2],pp.55-57.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(4)
>>> S.base
[0, 1, 2]
See Also
========
strong_gens, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._base == []:
self.schreier_sims()
return self._base
def baseswap(self, base, strong_gens, pos, randomized=False,\
transversals=None, basic_orbits=None, strong_gens_distr=None):
r"""
Swap two consecutive base points in a base and strong generating set.
If a base for a group `G` is given by `(b_1, b_2, ..., b_k)`, this
function returns a base `(b_1, b_2, ..., b_{i+1}, b_i, ..., b_k)`,
where `i` is given by ``pos``, and a strong generating set relative
to that base. The original base and strong generating set are not
modified.
The randomized version (default) is of Las Vegas type.
Parameters
==========
base, strong_gens
The base and strong generating set.
pos
The position at which swapping is performed.
randomized
A switch between randomized and deterministic version.
transversals
The transversals for the basic orbits, if known.
basic_orbits
The basic orbits, if known.
strong_gens_distr
The strong generators distributed by basic stabilizers, if known.
Returns
=======
(base, strong_gens)
``base`` is the new base, and ``strong_gens`` is a generating set
relative to it.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> S.baseswap(S.base, S.strong_gens, 1, randomized=False)
([0, 2, 1], [Permutation([1, 2, 3, 0]), Permutation([1, 0, 2, 3]), Permutation([0, 1, 3, 2]), Permutation([0, 3, 1, 2]), Permutation([0, 3, 2, 1])])
>>> S.base
[0, 1, 2]
>>> _verify_bsgs(S, S.base, S.strong_gens)
True
See Also
========
schreier_sims
Notes
=====
The deterministic version of the algorithm is discussed in
[1],pp.102-103; the randomized version is discussed in [1],p.103, and
[2],p.98. It is of Las Vegas type.
Notice that [1] contains a mistake in the pseudocode and
discussion of BASESWAP: on line 3 of the pseudocode,
`|\beta_{i+1}^{\left\langle T\right\rangle}|` should be replaced by
`|\beta_{i}^{\left\langle T\right\rangle}|`, and the same for the
discussion of the algorithm.
"""
# construct the basic orbits, generators for the stabilizer chain
# and transversal elements from whatever was provided
transversals, basic_orbits, strong_gens_distr =\
_handle_precomputed_bsgs(base, strong_gens, transversals,\
basic_orbits, strong_gens_distr)
base_len = len(base)
degree = self.degree
stab_pos = PermutationGroup(strong_gens_distr[pos])
# size of orbit of base[pos] under the stabilizer we seek to insert
# in the stabilizer chain at position pos + 1
size = len(basic_orbits[pos])*len(basic_orbits[pos + 1])\
//len(stab_pos.orbit(base[pos + 1]))
# initialize the wanted stabilizer by a subgroup
if pos + 2 > base_len - 1:
T = []
else:
T = strong_gens_distr[pos + 2][:]
if T == []:
current_group = PermGroup([_new_from_array_form(range(degree))])
else:
current_group = PermGroup(T)
# randomized version
if randomized is True:
schreier_vector = stab_pos.schreier_vector(base[pos + 1])
# add random elements of the stabilizer until they generate it
while len(current_group.orbit(base[pos])) != size:
new = stab_pos.random_stab(base[pos + 1],\
schreier_vector=schreier_vector)
T.append(new)
current_group = PermutationGroup(T)
# deterministic version
else:
Gamma = set(basic_orbits[pos])
Gamma.remove(base[pos])
if base[pos + 1] in Gamma:
Gamma.remove(base[pos + 1])
# add elements of the stabilizer until they generate it by
# ruling out member of the basic orbit of base[pos] along the way
while len(current_group.orbit(base[pos])) != size:
gamma = iter(Gamma).next()
x = transversals[pos][gamma]
x_inverse = ~x
temp = x_inverse(base[pos + 1])
if temp not in basic_orbits[pos + 1]:
Gamma = Gamma - current_group.orbit(gamma)
else:
y = transversals[pos + 1][temp]
el = x*y
if el(base[pos]) not in current_group.orbit(base[pos]):
T.append(el)
current_group = PermutationGroup(T)
Gamma = Gamma - current_group.orbit(base[pos])
# build the new base and strong generating set
strong_gens_new_distr = strong_gens_distr[:]
strong_gens_new_distr[pos + 1] = T
base_new = base[:]
base_new[pos], base_new[pos + 1] = base_new[pos + 1], base_new[pos]
strong_gens_new = _strong_gens_from_distr(strong_gens_new_distr)
for gen in T:
if gen not in strong_gens_new:
strong_gens_new.append(gen)
return base_new, strong_gens_new
@property
def basic_orbits(self):
"""
Return the basic orbits relative to a base and strong generating set.
If `(b_1, b_2, ..., b_k)` is a base for a group `G`, and
`G^{(i)} = G_{b_1, b_2, ..., b_{i-1}}` is the `i`-th basic stabilizer
(so that `G^{(1)} = G`), the `i`-th basic orbit relative to this base
is the orbit of `b_i` under `G^{(i)}`. See [1],pp.87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(4)
>>> S.basic_orbits
[[0, 1, 2, 3], [1, 2, 3], [2, 3]]
See Also
========
base, strong_gens, basic_transversals, basic_stabilizers
"""
if self._basic_orbits == []:
self.schreier_sims()
return self._basic_orbits
@property
def basic_stabilizers(self):
"""
Return a chain of stabilizers relative to a base and strong generating
set.
The `i`-th basic stabilizer `G^{(i)}` relative to a base
`(b_1, b_2, ..., b_k)` is `G_{b_1, b_2, ..., b_{i-1}}`. For more
information, see [1],pp.87-89.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> A.base
[0, 1]
>>> A.basic_stabilizers
[PermutationGroup([Permutation([1, 2, 0, 3]), Permutation([0, 2, 3, 1]), Permutation([0, 3, 1, 2])]), PermutationGroup([Permutation([0, 2, 3, 1]), Permutation([0, 3, 1, 2])])]
See Also
========
base, strong_gens, basic_orbits, basic_transversals
"""
if self._coset_repr == []:
self.schreier_sims()
strong_gens = self._strong_gens
base = self._base
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_stabilizers = []
for gens in strong_gens_distr:
basic_stabilizers.append(PermutationGroup(gens))
return basic_stabilizers
@property
def basic_transversals(self):
"""
Return basic transversals relative to a base and strong generating set.
The basic transversals are transversals of the basic orbits. They
are provided as a list of dictionaries, each dictionary having
keys - the elements of one of the basic orbits, and values - the
corresponding transversal elements. See [1],pp.87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.basic_transversals
[{0: Permutation([0, 1, 2, 3]), 1: Permutation([1, 2, 0, 3]),\
2: Permutation([2, 0, 1, 3]), 3: Permutation([3, 0, 2, 1])},\
{1: Permutation([0, 1, 2, 3]), 2: Permutation([0, 2, 3, 1]),\
3: Permutation([0, 3, 1, 2])}]
See Also
========
strong_gens, base, basic_orbits, basic_stabilizers
"""
if self._transversals == []:
self.schreier_sims()
return self._transversals
def center(self):
r"""
Return the center of a permutation group.
The center for a group `G` is defined as
`Z(G) = \{z\in G | \forall g\in G, zg = gz \}`,
the set of elements of `G` that commute with all elements of `G`.
It is equal to the centralizer of `G` inside `G`, and is naturally a
subgroup of `G` ([9]).
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> G = D.center()
>>> G.order()
2
See Also
========
centralizer
Notes
=====
This is a naive implementation that is a straightforward application
of ``.centralizer()``
"""
return self.centralizer(self)
def centralizer(self, other):
r"""
Return the centralizer of a group/set/element.
The centralizer of a set of permutations `S` inside
a group `G` is the set of elements of `G` that commute with all
elements of `S`:
`C_G(S) = \{ g \in G | gs = sg \forall s \in S\}` ([10])
Usually, `S` is a subset of `G`, but if `G` is a proper subgroup of
the full symmetric group, we allow for `S` to have elements outside
`G`.
It is naturally a subgroup of `G`; the centralizer of a permutation
group is equal to the centralizer of any set of generators for that
group, since any element commuting with the generators commutes with
any product of the generators.
Parameters
==========
``other`` - a permutation group/list of permutations/single permutation
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> S = SymmetricGroup(6)
>>> C = CyclicGroup(6)
>>> H = S.centralizer(C)
>>> H == C
True
See Also
========
subgroup_search
Notes
=====
The implementation is an application of ``.subgroup_search()`` with
tests using a specific base for the group `G`.
"""
if hasattr(other, 'generators'):
if other.is_trivial or self.is_trivial:
return self
degree = self.degree
identity = _new_from_array_form(range(degree))
orbits = other.orbits()
num_orbits = len(orbits)
orbits.sort(key = lambda x: -len(x))
long_base = []
orbit_reps = [None]*num_orbits
orbit_reps_indices = [None]*num_orbits
orbit_descr = [None]*degree
for i in range(num_orbits):
orbit = list(orbits[i])
orbit_reps[i] = orbit[0]
orbit_reps_indices[i] = len(long_base)
for point in orbit:
orbit_descr[point] = i
long_base = long_base + orbit
base, strong_gens = self.schreier_sims_incremental(base=long_base)
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
i = 0
for i in range(len(base)):
if strong_gens_distr[i] == [identity]:
break
base = base[:i]
base_len = i
for j in range(num_orbits):
if base[base_len - 1] in orbits[j]:
break
rel_orbits = orbits[: j + 1]
num_rel_orbits = len(rel_orbits)
transversals = [None]*num_rel_orbits
for j in range(num_rel_orbits):
rep = orbit_reps[j]
transversals[j] = dict(
other.orbit_transversal(rep, pairs=True))
trivial_test = lambda x: True
tests = [None]*base_len
for l in range(base_len):
if base[l] in orbit_reps:
tests[l] = trivial_test
else:
def test(computed_words, l=l):
g = computed_words[l]
rep_orb_index = orbit_descr[base[l]]
rep = orbit_reps[rep_orb_index]
rep_orb = orbits[rep_orb_index]
im = g(base[l])
im_rep = g(rep)
tr_el = transversals[rep_orb_index][base[l]]
if im != tr_el(im_rep):
return False
else:
return True
tests[l] = test
def prop(g):
return [g*gen for gen in other.generators] ==\
[gen*g for gen in other.generators]
return self.subgroup_search(prop, base=base,
strong_gens=strong_gens, tests=tests)
elif hasattr(other, '__getitem__'):
gens = list(other)
return self.centralizer(PermutationGroup(gens))
elif hasattr(other, 'array_form'):
return self.centralizer(PermutationGroup([other]))
def commutator(self, G, H):
"""
Return the commutator of two subgroups.
For a permutation group `K` and subgroups `G`, `H`, the
commutator of `G` and `H` is defined as the group generated
by all the commutators `[g, h] = hgh^{-1}g^{-1}` for `g` in `G` and
`h` in `H`. It is naturally a subgroup of `K` ([1],p.27).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> G = S.commutator(S, A)
>>> G == A
True
See Also
========
derived_subgroup
Notes
=====
The commutator of two subgroups `H, G` is equal to the normal closure
of the commutators of all the generators, i.e. `hgh^{-1}g^{-1}` for `h`
a generator of `H` and `g` a generator of `G` ([1],p.28)
"""
ggens = G.generators
hgens = H.generators
commutators = []
for ggen in ggens:
for hgen in hgens:
commutator = hgen*ggen*(~hgen)*(~ggen)
if commutator not in commutators:
commutators.append(commutator)
res = self.normal_closure(commutators)
return res
def coset_decomposition(self, g):
"""
Decompose `g` as h_0*...*h_{len(u)}
The Schreier-Sims coset representation u of `G`
gives a univoque decomposition of an element `g`
as h_0*...*h_{len(u)}, where h_i belongs to u[i]
Output: [h_0, .., h_{len(u)}] if `g` belongs to `G`
False otherwise
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([[0, 1, 3, 7, 6, 4], [2, 5]])
>>> b = Permutation([[0, 1, 3, 2], [4, 5, 7, 6]])
>>> G = PermutationGroup([a, b])
>>> c = Permutation([[0, 1, 2, 3, 4], [5, 6, 7]])
>>> G.coset_decomposition(c)
False
>>> c = Permutation([[0, 6], [1, 7], [2, 4], [3, 5]])
>>> G.coset_decomposition(c)
[[6, 4, 2, 0, 7, 5, 3, 1], [0, 4, 1, 5, 2, 6, 3, 7], [0, 1, 2, 3, 4, 5, 6, 7]]
>>> G.has_element(c)
True
"""
u = self.coset_repr()
if isinstance(g, Permutation):
g = g.array_form
g1 = g
n = len(u)
a = []
for i in range(n):
x = g1[i]
for h in u[i]:
if h[i] == x:
a.append(h)
p2 = perm_af_invert(h)
g1 = perm_af_mul(p2, g1)
break
else:
return False
if perm_af_muln(*a) == g:
return a
return False
def coset_rank(self, g):
"""
rank using Schreier-Sims representation
The coset rank of `g` is the ordering number in which
it appears in the lexicographic listing according to the
coset decomposition, see coset_decomposition;
the ordering is the same as in G.generate(method='coset').
If `g` does not belong to the group it returns None
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([[0, 1, 3, 7, 6, 4], [2, 5]])
>>> b = Permutation([[0, 1, 3, 2], [4, 5, 7, 6]])
>>> G = PermutationGroup([a, b])
>>> c = Permutation([[0, 1, 2, 3, 4], [5, 6, 7]])
>>> G.coset_rank(c)
>>> c = Permutation([[0, 6], [1, 7], [2, 4], [3, 5]])
>>> G.coset_rank(c)
40
>>> G.coset_unrank(40, af=True)
[6, 7, 4, 5, 2, 3, 0, 1]
"""
u = self.coset_repr()
if isinstance(g, Permutation):
g = g.array_form
g1 = g
m = len(u)
a = []
un = self._coset_repr_n
n = self.degree
rank = 0
base = [1]
for i in un[m:0:-1]:
base.append(base[-1]*i)
base.reverse()
a1 = [0]*m
i1 = -1
for i in self._base:
i1 += 1
x = g1[i]
for j, h in enumerate(u[i]):
if h[i] == x:
a.append(h)
a1[i] = j
rank += j*base[i1]
p2 = perm_af_invert(h)
g1 = perm_af_mul(p2, g1)
break
else:
return None
if perm_af_muln(*a) == g:
return rank
return None
def coset_repr(self):
"""
Return the Schreier-Sims representation of the group.
The Schreier-Sims representation is the list of the cosets of
the chain of stabilizers, see schreier_sims.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.coset_repr()
[[[0, 1, 2], [1, 0, 2], [2, 0, 1]], [[0, 1, 2], [0, 2, 1]]]
"""
if not self._coset_repr:
self.schreier_sims()
return self._coset_repr
def coset_unrank(self, rank, af=False):
"""
unrank using Schreier-Sims representation
coset_unrank is the inverse operation of coset_rank
if 0 <= rank < order; otherwise it returns None.
"""
u = self.coset_repr()
if rank < 0 or rank >= self.order():
return None
un = self._coset_repr_n
base = self._base
m = len(u)
nb = len(base)
assert nb == len(un)
v = [0]*m
for i in range(nb-1, -1,-1):
j = base[i]
rank, c = divmod(rank, un[i])
v[j] = c
a = [u[i][v[i]] for i in range(m)]
h = perm_af_muln(*a)
if af:
return h
else:
return _new_from_array_form(h)
@property
def degree(self):
"""
Returns the size of the permutations in the group.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1,0])
>>> G = PermutationGroup([a])
>>> G.degree
2
"""
return self._degree
def derived_series(self):
r"""
Return the derived series for the group.
The derived series for a group `G` is defined as
`G = G_0 > G_1 > G_2 > \ldots`
where `G_i = [G_{i-1}, G_{i-1}]`, i.e. `G_i` is the derived subgroup of
`G_{i-1}`, for `i\in\mathbb{N}`. When we have `G_k = G_{k-1}` for some
`k\in\mathbb{N}`, the series terminates.
Returns
=======
A list of permutation groups containing the members of the derived
series in the order `G = G_0, G_1, G_2, \ldots`.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup, DihedralGroup)
>>> A = AlternatingGroup(5)
>>> len(A.derived_series())
1
>>> S = SymmetricGroup(4)
>>> len(S.derived_series())
4
>>> S.derived_series()[1] == AlternatingGroup(4)
True
>>> S.derived_series()[2] == DihedralGroup(2)
True
See Also
========
derived_subgroup
"""
res = [self]
current = self
next = self.derived_subgroup()
while current != next:
res.append(next)
current = next
next = next.derived_subgroup()
return res
def derived_subgroup(self):
"""
Compute the derived subgroup.
The derived subgroup, or commutator subgroup is the subgroup generated
by all commutators `[g, h] = hgh^{-1}g^{-1}` for `g, h\in G` ; it is
equal to the normal closure of the set of commutators of the generators
([1],p.28, [11]).
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 0, 2, 4, 3])
>>> b = Permutation([0, 1, 3, 2, 4])
>>> G = PermutationGroup([a, b])
>>> C = G.derived_subgroup()
>>> list(C.generate(af=True))
[[0, 1, 2, 3, 4], [0, 1, 3, 4, 2], [0, 1, 4, 2, 3]]
See Also
========
derived_series
"""
r = self._r
gens = [p.array_form for p in self.generators]
gens_inv = [perm_af_invert(p) for p in gens]
set_commutators = set()
for i in range(r):
for j in range(r):
p1 = gens[i]
p1inv = gens_inv[i]
p2 = gens[j]
p2inv = gens_inv[j]
c = [p1[p2[p1inv[k]]] for k in p2inv]
ct = tuple(c)
if not ct in set_commutators:
set_commutators.add(ct)
cms = [Permutation(p) for p in set_commutators]
G2 = self.normal_closure(cms)
return G2
def generate(self, method="coset", af=False):
"""
return iterator to generate the elements of the group
Iteration is done with one of these methods:
method='coset' using the Schreier-Sims coset representation
method='dimino' using the Dimino method
If af = True it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate(af=True))
[[0, 1, 2, 3], [0, 1, 3, 2], [0, 2, 3, 1], [0, 2, 1, 3], [0, 3, 2, 1], [0, 3, 1, 2]]
"""
if method == "coset":
return self.generate_schreier_sims(af)
elif method == "dimino":
return self.generate_dimino(af)
else:
raise ValueError('there is not this method')
def generate_dimino(self, af=False):
"""
yield group elements using Dimino's algorithm
If af == True it yields the array form of the permutations
Reference:
[1] The implementation of various algorithms for Permutation Groups in
the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_dimino(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1], [0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]]
"""
idn = range(self.degree)
order = 0
element_list = [idn]
set_element_list = set([tuple(idn)])
if af:
yield idn
else:
yield _new_from_array_form(idn)
gens = [p.array_form for p in self.generators]
for i in xrange(len(gens)):
# D elements of the subgroup G_i generated by gens[:i]
D = element_list[:]
N = [idn]
while N:
A = N
N = []
for a in A:
for g in gens[:i+1]:
ag = perm_af_mul(a, g)
if tuple(ag) not in set_element_list:
# produce G_i*g
for d in D:
order += 1
ap = perm_af_mul(d, ag)
if af:
yield ap
else:
p = _new_from_array_form(ap)
yield p
element_list.append(ap)
set_element_list.add(tuple(ap))
N.append(ap)
self._order = len(element_list)
def generate_schreier_sims(self, af=False):
"""
yield group elements using the Schreier-Sims representation
If af = True it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_schreier_sims(af=True))
[[0, 1, 2, 3], [0, 1, 3, 2], [0, 2, 3, 1], [0, 2, 1, 3], [0, 3, 2, 1], [0, 3, 1, 2]]
"""
def get1(posmax):
n = len(posmax) - 1
for i in range(n,-1,-1):
if posmax[i] != 1:
return i + 1
n = self.degree
u = self.coset_repr()
# stg stack of group elements
stg = [range(n)]
# posmax[i] = len(u[i])
posmax = [len(x) for x in u]
n1 = get1(posmax)
pos = [0]*n1
posmax = posmax[:n1]
h = 0
while 1:
# backtrack when finished iterating over coset
if pos[h] >= posmax[h]:
if h == 0:
raise StopIteration
pos[h] = 0
h -= 1
stg.pop()
continue
p = perm_af_mul(stg[-1], u[h][pos[h]])
pos[h] += 1
stg.append(p)
h += 1
if h == n1:
if af:
yield p
else:
p1 = _new_from_array_form(p)
yield p1
stg.pop()
h -= 1
@property
def generators(self):
"""
Returns the generators of the group in array form.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.generators
[Permutation([0, 2, 1]), Permutation([1, 0, 2])]
"""
return self._generators
def has_element(self, g):
"""
test if `g` belongs to G; see coset_decomposition
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> g.has_element(Permutation([0, 1, 3, 2]))
True
>>> g.has_element(Permutation([1, 2, 3, 0]))
False
"""
return bool(self.coset_decomposition(g.array_form))
@property
def is_abelian(self):
"""
Checks if the group is Abelian.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.is_abelian
False
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_abelian
True
"""
if self._is_abelian is not None:
return self._is_abelian
self._is_abelian = True
gens = [p.array_form for p in self.generators]
for x in gens:
for y in gens:
if y <= x:
continue
if not perm_af_commutes_with(x, y):
self._is_abelian = False
return False
return True
def is_alt_sym(self, eps=0.05, _random_prec=None):
r"""
Monte Carlo test for the symmetric/alternating group for degrees >= 8.
More specifically, it is one-sided Monte Carlo with the
answer True (i.e., G is symmetric/alternating) guaranteed to be
correct, and the answer False being incorrect with probability eps.
Notes
=====
The algorithm itself uses some nontrivial results from group theory and
number theory:
1) If a transitive group `G` of degree ``n`` contains an element
with a cycle of length `n/2 < p < n-2` for `p` a prime, `G` is the
symmetric or alternating group ([1], pp.81-82)
2) The proportion of elements in the symmetric/alternating group having
the property described in 1) is approximately `\log(2)/\log(n)`
([1], p.82; [2], pp.226-227).
The helper function ``_check_cycles_alt_sym`` is used to
go over the cycles in a permutation and look for ones satisfying 1).
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_alt_sym()
False
See Also
========
_check_cycles_alt_sym
"""
if _random_prec == None:
n = self.degree
if n < 8:
return False
if not self.is_transitive:
return False
if n < 17:
c_n = 0.34
else:
c_n = 0.57
d_n = (c_n*log(2))/log(n)
N_eps = int(-log(eps)/d_n)
for i in range(N_eps):
perm = self.random_pr()
if _check_cycles_alt_sym(perm):
return True
return False
else:
for i in range(_random_prec['N_eps']):
perm = _random_prec[i]
if _check_cycles_alt_sym(perm):
return True
return False
@property
def is_nilpotent(self):
"""
Test if the group is nilpotent.
A group `G` is nilpotent if it has a central series of finite length.
Alternatively, `G` is nilpotent if its lower central series terminates
with the trivial group. Every nilpotent group is also solvable
([1],p.29, [12]).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> C = CyclicGroup(6)
>>> C.is_nilpotent
True
>>> S = SymmetricGroup(5)
>>> S.is_nilpotent
False
See Also
========
lower_central_series, is_solvable
"""
if self._is_nilpotent is None:
lcs = self.lower_central_series()
terminator = lcs[len(lcs)-1]
gens = terminator.generators
degree = self.degree
identity = _new_from_array_form(range(degree))
if [identity for gen in gens] == gens:
self._is_solvable = True
self._is_nilpotent = True
return True
else:
self._is_nilpotent = False
return False
else:
return self._is_nilpotent
def is_normal(self, gr):
"""
test if G=self is a normal subgroup of gr
G is normal in gr if
for each g2 in G, g1 in gr, g = g1*g2*g1**-1 belongs to G
It is sufficient to check this for each g1 in gr.generator and
g2 g2 in G.generator
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G1 = PermutationGroup([a, Permutation([2, 0, 1])])
>>> G1.is_normal(G)
True
"""
gens2 = [p.array_form for p in self.generators]
gens1 = [p.array_form for p in gr.generators]
for g1 in gens1:
for g2 in gens2:
p = perm_af_muln(g1, g2, perm_af_invert(g1))
if not self.coset_decomposition(p):
return False
return True
def is_primitive(self, randomized=True):
"""
Test a group for primitivity.
A permutation group `G` acting on a set `S` is called primitive if
`S` contains no nontrivial block under the action of `G`
(a block is nontrivial if its cardinality is more than `1`).
Notes
=====
The algorithm is described in [1], p.83, and uses the function
minimal_block to search for blocks of the form `\{0, k\}` for `k`
ranging over representatives for the orbits of `G_0`, the stabilizer of
`0`. This algorithm has complexity `O(n^2)` where `n` is the degree
of the group, and will perform badly if `G_0` is small.
There are two implementations offered: one finds `G_0`
deterministically using the function ``stabilizer``, and the other
(default) produces random elements of `G_0` using ``random_stab``,
hoping that they generate a subgroup of `G_0` with not too many more
orbits than G_0 (this is suggested in [1], p.83). Behavior is changed
by the ``randomized`` flag.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_primitive()
False
See Also
========
minimal_block, random_stab
"""
if self._is_primitive != None:
return self._is_primitive
n = self.degree
if randomized:
r = len(self.generators)
random_stab_gens = []
v = self.schreier_vector(0)
for i in range(r):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0 and self.minimal_block([0, x]) != [0]*n:
self._is_primitive = False
return False
self._is_primitive = True
return True
@property
def is_solvable(self):
"""
Test if the group is solvable
`G` is solvable if its derived series terminates with the trivial
group ([1],p.29).
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(3)
>>> S.is_solvable
True
See Also
========
is_nilpotent, derived_series
"""
if self._is_solvable is None:
ds = self.derived_series()
terminator = ds[len(ds) - 1]
gens = terminator.generators
degree = self.degree
identity = _new_from_array_form(range(degree))
if [identity for gen in gens] == gens:
self._is_solvable = True
return True
else:
self._is_solvable = False
return False
else:
return self._is_solvable
def is_subgroup(self, gr):
"""
test if self is a subgroup of gr
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1,2,3,4,0])
>>> b = Permutation([1,0,2,3,4])
>>> G = PermutationGroup([a, b])
>>> c = Permutation([1,0,3,2,4])
>>> G1 = PermutationGroup([a, c])
>>> G1.is_subgroup(G)
True
"""
if self.degree != gr.degree:
return False
if self.order() > gr.order():
return False
gens1 = self.generators
for g in gens1:
if not gr.has_element(g):
return False
return True
@property
def is_transitive(self):
"""
test if the group is transitive
A group is transitive if it has a single orbit.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G1 = PermutationGroup([a, b])
>>> G1.is_transitive
False
>>> c = Permutation([2, 3, 0, 1])
>>> G2 = PermutationGroup([a, c])
>>> G2.is_transitive
True
"""
if self._is_transitive is not None:
return self._is_transitive
ans = len(self.orbit(0)) == self.degree
self._is_transitive = ans
return ans
@property
def is_trivial(self):
"""
Test if the group is the trivial group.
This is true if and only if all the generators are the identity.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> id = Permutation(range(5))
>>> G = PermutationGroup([id, id, id])
>>> G.is_trivial
True
"""
if self._is_trivial is None:
gens = self.generators
degree = self.degree
identity = _new_from_array_form(range(degree))
res = [identity for gen in gens] == gens
self._is_trivial = res
return res
else:
return self._is_trivial
def lower_central_series(self):
r"""
Return the lower central series for the group.
The lower central series for a group `G` is the series
`G = G_0 > G_1 > G_2 > \ldots` where
`G_k = [G, G_{k-1}]`, i.e. every term after the first is equal to the
commutator of `G` and the previous term in `G1` ([1],p.29).
Returns
=======
A list of permutation groups in the order
`G = G_0, G_1, G_2, \ldots`
Examples
========
>>> from sympy.combinatorics.named_groups import (AlternatingGroup,
... DihedralGroup)
>>> A = AlternatingGroup(4)
>>> len(A.lower_central_series())
2
>>> A.lower_central_series()[1] == DihedralGroup(2)
True
See Also
========
commutator, derived_series
"""
res = [self]
current = self
next = self.commutator(self, current)
while current != next:
res.append(next)
current = next
next = self.commutator(self, current)
return res
@property
def max_div(self):
"""
Maximum proper divisor of the degree of a permutation group.
Notes
=====
Obviously, this is the degree divided by its minimal proper divisor
(larger than `1`, if one exists). As it is guaranteed to be prime,
the ``sieve`` from ``sympy.ntheory`` is used.
This function is also used as an optimization tool for the functions
``minimal_block`` and ``_union_find_merge``.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> G = PermutationGroup([Permutation([0,2,1,3])])
>>> G.max_div
2
See Also
========
minimal_block, _union_find_merge
"""
if self._max_div != None:
return self._max_div
n = self.degree
if n == 1:
return 1
for x in sieve:
if n % x == 0:
d = n//x
self._max_div = d
return d
def minimal_block(self, points):
r"""
For a transitive group, finds the block system generated by ``points``.
If a group `G` acts on a set `S`, a nonempty subset `B` of `S` is
called a block under the action of `G` if for all `g` in `G` we have
`gB = B` (`g` fixes `B`) or `gB` and `B` have no common points
(`g` moves `B` entirely). ([1], p.23; [6]).
The distinct translates `gB` of a block `B` for `g` in `G` partition
the set `S` and this set of translates is known as a block system.
Moreover, we obviously have that all blocks in the partition have
the same size, hence the block size divides `|S|` ([1], p.23).
A `G`-congruence is an equivalence relation `~` on the set `S` such that
`a ~ b` implies `g(a) ~ g(b)` for all `g` in `G`. For a
transitive group, the equivalence classes of a `G`-congruence and the
blocks of a block system are the same thing ([1], p.23).
The algorithm below checks the group for transitivity, and then finds
the `G`-congruence generated by the pairs `(p_0, p_1), (p_0, p_2), ...,
(p_0,p_{k-1})` which is the same as finding the maximal block system
(i.e., the one with minimum block size) such that
`p_0, ..., p_{k-1}` are in the same block ([1], p.83).
It is an implementation of Atkinson's algorithm, as suggested in [1],
and manipulates an equivalence relation on the set `S` using a
union-find data structure. The running time is just above
`O(|points||S|)`. ([1], pp.83-87; [7]).
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.minimal_block([0,5])
[0, 6, 2, 8, 4, 0, 6, 2, 8, 4]
>>> D.minimal_block([0,1])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
See Also
========
_union_find_rep, _union_find_merge, is_transitive, is_primitive
"""
if not self.is_transitive:
return False
n = self.degree
gens = self.generators
# initialize the list of equivalence class representatives
parents = range(n)
ranks = [1]*n
not_rep = []
k = len(points)
# the block size must divide the degree of the group
if k > self.max_div:
return [0]*n
for i in xrange(k-1):
parents[points[i+1]] = points[0]
not_rep.append(points[i+1])
ranks[points[0]] = k
i = 0
len_not_rep = k-1
while i < len_not_rep:
temp = not_rep[i]
i += 1
for gen in gens:
# find has side effects: performs path compression on the list
# of representatives
delta = self._union_find_rep(temp, parents)
# union has side effects: performs union by rank on the list
# of representatives
temp = self._union_find_merge(gen(temp), gen(delta), ranks,\
parents, not_rep)
if temp == -1:
return [0]*n
len_not_rep += temp
for i in range(n):
# force path compression to get the final state of the equivalence
# relation
self._union_find_rep(i, parents)
return parents
def normal_closure(self, other, k=10):
r"""
Return the normal closure of a subgroup/set of permutations.
If `S` is a subset of a group `G`, the normal closure of `A` in `G`
is defined as the intersection of all normal subgroups of `G` that
contain `A` ([1],p.14). Alternatively, it is the group generated by
the conjugates `x^{-1}yx` for `x` a generator of `G` and `y` a
generator of the subgroup `\left\langle S\right\rangle` generated by
`S` (for some chosen generating set for `\left\langle S\right\rangle`)
([1],p.73).
Parameters
==========
``other`` - a subgroup/list of permutations/single permutation
``k`` - an implementation-specific parameter that determines the number
of conjugates that are adjoined to ``other`` at once
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup, AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> C = CyclicGroup(5)
>>> G = S.normal_closure(C)
>>> G.order()
60
>>> G == AlternatingGroup(5)
True
See Also
========
commutator, derived_subgroup, random_pr
Notes
=====
The algorithm is described in [1],pp.73-74; it makes use of the
generation of random elements for permutation groups by the product
replacement algorithm.
"""
if hasattr(other, 'generators'):
degree = self.degree
identity = _new_from_array_form(range(degree))
if other.generators == [identity for gen in other.generators]:
return other
Z = PermutationGroup(other.generators[:])
base, strong_gens = Z.schreier_sims_incremental()
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals =\
_orbits_transversals_from_bsgs(base, strong_gens_distr)
C = False
self._random_pr_init(r=10, n=20)
while C == False:
Z._random_pr_init(r=10, n=10)
for i in range(k):
g = self.random_pr()
h = Z.random_pr()
conj = (~g)*h*g
res = _strip(conj, base, basic_orbits, basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
gens = Z.generators
gens.append(conj)
Z = PermutationGroup(gens)
strong_gens.append(conj)
temp_base, temp_strong_gens =\
Z.schreier_sims_incremental(base, strong_gens)
base, strong_gens = temp_base, temp_strong_gens
strong_gens_distr =\
_distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals =\
_orbits_transversals_from_bsgs(base,\
strong_gens_distr)
C = True
break_flag = False
for g in self.generators:
for h in Z.generators:
conj = (~g)*h*g
res = _strip(conj, base, basic_orbits,\
basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
C = False
break_flag = True
break
if break_flag == True:
break
return Z
elif hasattr(other, '__getitem__'):
return self.normal_closure(PermutationGroup(other))
elif hasattr(other, 'array_form'):
return self.normal_closure(PermutationGroup([other]))
def orbit(self, alpha, action='tuples'):
r"""
Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set.
The time complexity of the algorithm used here is `O(|Orb|*r)` where
`|Orb|` is the size of the orbit and `r` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp.19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([1,2,0,4,5,6,3])
>>> G = PermutationGroup([a])
>>> G.orbit(0)
set([0, 1, 2])
>>> G.orbit([0,4], 'union')
set([0, 1, 2, 3, 4, 5, 6])
See Also
========
orbit_transversal
"""
if not hasattr(alpha, '__getitem__'):
alpha = [alpha]
if len(alpha) == 1 or action == 'union':
orb = alpha
used = [False]*self.degree
for el in alpha:
used[el] = True
gens = self.generators
for b in orb:
for gen in gens:
temp = gen(b)
if used[temp] == False:
orb.append(temp)
used[temp] = True
return set(orb)
elif action == 'tuples':
alpha = tuple(alpha)
orb = [alpha]
used = set([alpha])
gens = self.generators
for b in orb:
for gen in gens:
temp = tuple([gen(x) for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return set(orb)
elif action == 'sets':
alpha = frozenset(alpha)
orb = [alpha]
used = set([alpha])
gens = self.generators
for b in orb:
for gen in gens:
temp = frozenset([gen(x) for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return set([tuple(x) for x in orb])
def orbit_rep(self, alpha, beta, schreier_vector=None):
"""
Return a group element which sends ``alpha`` to ``beta``.
If ``beta`` is not in the orbit of ``alpha``, the function returns
``False``. This implementation makes use of the schreier vector.
For a proof of correctness, see [1], p.80
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> G = AlternatingGroup(5)
>>> G.orbit_rep(0,4)
Permutation([4, 2, 3, 0, 1])
See Also
========
schreier_vector
"""
if schreier_vector == None:
schreier_vector = self.schreier_vector(alpha)
if schreier_vector[beta] == None:
return False
n = self.degree
u = _new_from_array_form(range(n))
k = schreier_vector[beta]
gens = self.generators
while k != -1:
u = u*gens[k]
beta = (~gens[k])(beta)
k = schreier_vector[beta]
return u
def orbit_transversal(self, alpha, pairs=False):
r"""
Computes a transversal for the orbit of ``alpha`` as a set.
For a permutation group `G`, a transversal for the orbit
`Orb = \{g(\alpha) | g \in G\}` is a set
`\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
`(\beta, g_\beta)`. For a proof of correctness, see [1], p.79
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.orbit_transversal(0)
[Permutation([0, 1, 2, 3, 4, 5]), Permutation([1, 2, 3, 4, 5, 0]),
Permutation([5, 4, 3, 2, 1, 0]), Permutation([2, 3, 4, 5, 0, 1]),
Permutation([4, 3, 2, 1, 0, 5]), Permutation([3, 4, 5, 0, 1, 2])]
See Also
========
orbit
"""
n = self.degree
tr = [(alpha, _new_from_array_form(range(n)))]
used = [False]*n
used[alpha] = True
gens = self.generators
for pair in tr:
for gen in gens:
temp = gen(pair[0])
if used[temp] == False:
tr.append((temp, gen*pair[1]))
used[temp] = True
if pairs:
return tr
return [pair[1] for pair in tr]
def orbits(self, rep=False):
"""
compute the orbits of G;
if rep=False it returns a list of sets
else it returns a list of representatives of the orbits
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.orbits()
[set([0, 1, 2])]
>>> G.orbits(rep=True)
[0]
"""
n = self._degree
s1 = set(range(n))
orbs = []
while s1:
i = s1.pop()
si = self.orbit(i)
if rep:
orbs.append(i)
else:
orbs.append(si)
s1 -= si
return orbs
def order(self):
"""
return the order of the group
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.order()
6
"""
if self._order != None:
return self._order
if self._is_sym:
n = self.degree
self._order = factorial(n)
return self._order
if self._is_alt:
n = self.degree
self._order = factorial(n)/2
return self._order
self.schreier_sims()
m = 1
for x in self._coset_repr_n:
m *= x
return m
def pointwise_stabilizer(self, points):
r"""
Return the pointwise stabilizer for a set of points.
For a permutation group `G` and a set of points
`\{p_1, p_2,\ldots, p_k\}`, the pointwise stabilizer of
`p_1, p_2, \ldots, p_k` is defined as
`G_{p_1,\ldots, p_k} =
\{g\in G | g(p_i) = p_i \forall i\in\{1, 2,\ldots,k\}\} ([1],p20).
It is a subgroup of `G`.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(7)
>>> Stab = S.pointwise_stabilizer([2, 3, 5])
>>> Stab == S.stabilizer(2).stabilizer(3).stabilizer(5)
True
See Also
========
stabilizer, schreier_sims_incremental
Notes
=====
Rather than the obvious implementation using successive calls to
.stabilizer(), this uses the incremental Schreier-Sims algorithm
to obtain a base with starting segment - the given points.
"""
base, strong_gens = self.schreier_sims_incremental(base=points)
stab_gens = []
degree = self.degree
identity = _new_from_array_form(range(degree))
for gen in strong_gens:
if [gen(point) for point in points] == points:
stab_gens.append(gen)
return PermutationGroup(stab_gens)
def random(self, af=False):
"""
return a random group element
"""
rank = randrange(self.order())
return self.coset_unrank(rank, af)
def random_pr(self, gen_count=11, iterations=50, _random_prec=None):
"""
Return a random group element using product replacement.
For the details of the product replacement algorithm, see
``_random_pr_init`` In ``random_pr`` the actual 'product replacement'
is performed. Notice that if the attribute ``_random_gens``
is empty, it needs to be initialized by ``_random_pr_init``.
See Also
========
_random_pr_init
"""
if self._random_gens == []:
self._random_pr_init(gen_count, iterations)
random_gens = self._random_gens
r = len(random_gens) - 1
# handle randomized input for testing purposes
if _random_prec == None:
s = randrange(r)
t = randrange(r - 1)
if t == s:
t = r - 1
x = choice([1, 2])
e = choice([-1, 1])
else:
s = _random_prec['s']
t = _random_prec['t']
if t == s:
t = r - 1
x = _random_prec['x']
e = _random_prec['e']
if x == 1:
random_gens[s] = random_gens[s]*(random_gens[t]**e)
random_gens[r] = random_gens[r]*random_gens[s]
else:
random_gens[s] = (random_gens[t]**e)*random_gens[s]
random_gens[r] = random_gens[s]*random_gens[r]
return random_gens[r]
def random_stab(self, alpha, schreier_vector=None, _random_prec=None):
"""
Random element from the stabilizer of ``alpha``.
The schreier vector for ``alpha`` is an optional argument used
for speeding up repeated calls. The algorithm is described in [1], p.81
See Also
========
random_pr, orbit_rep
"""
if schreier_vector == None:
schreier_vector = self.schreier_vector(alpha)
if _random_prec == None:
rand = self.random_pr()
else:
rand = _random_prec['rand']
beta = rand(alpha)
h = self.orbit_rep(alpha, beta, schreier_vector)
return (~h)*rand
def schreier_sims(self):
"""
Schreier-Sims algorithm.
It computes the generators of the stabilizers chain
G > G_{b_1} > .. > G_{b1,..,b_r} > 1
in which G_{b_1,..,b_i} stabilizes b_1,..,b_i,
and the corresponding `s` cosets.
An element of the group can be written univoquely
as the product h_1*..*h_s.
We use Jerrum's filter in our implementation of the
Schreier-Sims algorithm. It runs in polynomial time.
This implementation is a translation of the C++ implementation in
http://www.m8j.net
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_sims()
>>> G.stabilizers_gens()
[[0, 2, 1]]
>>> G.coset_repr()
[[[0, 1, 2], [1, 0, 2], [2, 0, 1]], [[0, 1, 2], [0, 2, 1]]]
"""
if self._coset_repr:
return
JGr = _JGraph(self)
alpha = 0
n = JGr.n
self._order = 1
coset_repr = []
num_generators = []
generators = []
gen = range(n)
base = {}
JGr.gens += [None]*(n - len(JGr.gens))
while 1:
self._coset_repr_n = 0
self._coset_repr = [None]*n
JGr.schreier_tree(alpha, gen)
cri = []
for p in self._coset_repr:
if not p:
cri.append(p)
else:
cri.append(perm_af_invert(p))
JGr.jerrum_filter(alpha, cri)
if self._coset_repr_n > 1:
base[alpha] = self._coset_repr_n
self._order *= self._coset_repr_n
coset_repr.append([p for p in self._coset_repr if p])
d = {}
for p in self._coset_repr:
if p:
d[p[alpha]] = p
num_generators.append(JGr.r)
if JGr.r:
generators.extend(JGr.gens[:JGr.r])
if JGr.r <= 0:
break
alpha += 1
self._coset_repr = coset_repr
a = []
for p in generators:
if p not in a:
a.append(p)
self._stabilizers_gens = a
i = len(JGr.gens) - 1
while not JGr.gens[i]:
i -= 1
JGr.gens = JGr.gens[:i+1]
self._base = base.keys()
self._coset_repr_n = base.values()
strong_gens = self.generators[:]
for gen in self._stabilizers_gens:
gen = Permutation(gen)
if gen not in strong_gens:
strong_gens.append(gen)
self._strong_gens = strong_gens
base_len = len(self._base)
transversals = [None]*base_len
basic_orbits = [None]*base_len
for index in xrange(base_len):
transversals[index] = {}
base_point = self._base[index]
trans = self._coset_repr[base_point][:]
for el in trans:
el = Permutation(el)
orbit_member = el(base_point)
transversals[index][orbit_member] = el
basic_orbits[index] =\
transversals[index].keys()
self._transversals = transversals
self._basic_orbits = basic_orbits
def schreier_sims_incremental(self, base=None, gens=None):
"""
Extend a sequence of points and generating set to a base and strong
generating set.
Parameters
==========
base
The sequence of points to be extended to a base. Optional
parameter with default value ``[]``.
gens
The generating set to be extended to a strong generating set
relative to the base obtained. Optional parameter with default
value ``self.generators``.
Returns
=======
(base, strong_gens)
``base`` is the base obtained, and ``strong_gens`` is the strong
generating set relative to it. The original parameters ``base``,
``gens`` remain unchanged.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(7)
>>> base = [2, 3]
>>> seq = [2, 3]
>>> base, strong_gens = A.schreier_sims_incremental(base=seq)
>>> _verify_bsgs(A, base, strong_gens)
True
>>> base[:2]
[2, 3]
Notes
=====
This version of the Schreier-Sims algorithm runs in polynomial time.
There are certain assumptions in the implementation - if the trivial
group is provided, ``base`` and ``gens`` are returned immediately,
as any sequence of points is a base for the trivial group. If the
identity is present in the generators ``gens``, it is removed as
it is a redundant generator.
The implementation is described in [1],pp.90-93.
See Also
========
schreier_sims, schreier_sims_random
"""
if base is None:
base = []
if gens is None:
gens = self.generators[:]
base_len = len(base)
degree = self.degree
identity = _new_from_array_form(range(degree))
# handle the trivial group
if gens == [identity]:
return base, gens
# prevent side effects
_base, _gens = base[:], gens[:]
# remove the identity as a generator
_gens = [x for x in _gens if x != identity]
# make sure no generator fixes all base points
for gen in _gens:
if [gen(x) for x in _base] == [x for x in _base]:
new = 0
while gen(new) == new:
new += 1
_base.append(new)
base_len += 1
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(_base, _gens)
# initialize the basic stabilizers, basic orbits and basic transversals
stabs = {}
orbs = {}
transversals = {}
for i in xrange(base_len):
stabs[i] = PermutationGroup(strong_gens_distr[i])
transversals[i] = dict(stabs[i].orbit_transversal(_base[i],\
pairs=True))
orbs[i] = transversals[i].keys()
# main loop: amend the stabilizer chain until we have generators
# for all stabilizers
i = base_len - 1
while i >= 0:
# this flag is used to continue with the main loop from inside
# a nested loop
continue_i = False
# test the generators for being a strong generating set
for beta in orbs[i]:
u_beta = transversals[i][beta]
for gen in strong_gens_distr[i]:
u_beta_gen = transversals[i][gen(beta)]
if gen*u_beta != u_beta_gen:
# test if the schreier generator is in the i+1-th
# would-be basic stabilizer
y = True
schreier_gen = (~u_beta_gen)*gen*u_beta
h, j = _strip(schreier_gen, _base, orbs, transversals)
if j <= base_len:
# new strong generator h at level j
y = False
elif h != _new_from_array_form(range(degree)):
# h fixes all base points
y = False
moved = 0
while h(moved) == moved:
moved += 1
_base.append(moved)
base_len += 1
strong_gens_distr.append([])
if y == False:
# if a new strong generator is found, update the
# data structures and start over
for l in range(i + 1, j):
strong_gens_distr[l].append(h)
stabs[l] =\
PermutationGroup(strong_gens_distr[l])
transversals[l] =\
dict(stabs[l].orbit_transversal(_base[l],\
pairs=True))
orbs[l] = transversals[l].keys()
i = j - 1
# continue main loop using the flag
continue_i = True
if continue_i == True:
break
if continue_i == True:
break
if continue_i == True:
continue
i -= 1
# build the strong generating set
strong_gens = []
for gens in strong_gens_distr:
for gen in gens:
if gen not in strong_gens:
strong_gens.append(gen)
return _base, strong_gens
def schreier_sims_random(self, base=None, gens=None, consec_succ=10,\
_random_prec=None):
r"""
Randomized Schreier-Sims algorithm.
The randomized Schreier-Sims algorithm takes the sequence ``base``
and the generating set ``gens``, and extends ``base`` to a base, and
``gens`` to a strong generating set relative to that base with
probability of a wrong answer at most `1/\text{consec\_succ}`.
Parameters
==========
base
The sequence to be extended to a base.
gens
The generating set to be extended to a strong generating set.
consec_succ
The parameter defining the probability of a wrong answer.
_random_prec
An internal parameter used for testing purposes.
Returns
=======
(base, strong_gens)
``base`` is the base and ``strong_gens`` is the strong generating
set relative to it.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(5)
>>> base, strong_gens = S.schreier_sims_random(consec_succ=5)
>>> _verify_bsgs(S, base, strong_gens) #doctest: +SKIP
True
Notes
=====
The algorithm is described in detail in [1],pp.97-98. It extends
the orbits ``orbs`` and the permutation groups ``stabs`` to
basic orbits and basic stabilizers for the base and strong generating
set produced in the end.
The idea of the extension process
is to "sift" random group elements through the stabilizer chain
and amend the stabilizers/orbits along the way when a sift
is not successful.
The helper function ``_strip`` is used to attempt
to decompose a random group element according to the current
state of the stabilizer chain and report whether the element was
fully decomposed (successful sift) or not (unsuccessful sift). In
the latter case, the level at which the sift failed is reported and
used to amend ``stabs``, ``base``, ``gens`` and ``orbs`` accordingly.
The halting condition is for ``consec_succ`` consecutive successful
sifts to pass. This makes sure that the current ``base`` and ``gens``
form a BSGS with probability at least `1 - 1/\text{consec\_succ}`.
See Also
========
schreier_sims
"""
if base is None:
base = []
if gens is None:
gens = self.generators
base_len = len(base)
n = self.degree
# make sure no generator fixes all base points
for gen in gens:
if [gen(x) for x in base] == [x for x in base]:
new = 0
while gen(new) == new:
new += 1
base.append(new)
base_len += 1
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(base, gens)
# initialize the basic stabilizers, basic transversals and basic orbits
stabs = {}
transversals = {}
orbs = {}
for i in xrange(base_len):
stabs[i] = PermutationGroup(strong_gens_distr[i])
transversals[i] = dict(stabs[i].orbit_transversal(base[i],\
pairs=True))
orbs[i] = transversals[i].keys()
# initialize the number of consecutive elements sifted
c = 0
# start sifting random elements while the number of consecutive sifts
# is less than consec_succ
while c < consec_succ:
if _random_prec is None:
g = self.random_pr()
else:
g = _random_prec['g'].pop()
h, j = _strip(g, base, orbs, transversals)
y = True
# determine whether a new base point is needed
if j <= base_len:
y = False
elif not h.is_Identity:
y = False
moved = 0
while h(moved) == moved:
moved += 1
base.append(moved)
base_len += 1
strong_gens_distr.append([])
# if the element doesn't sift, amend the strong generators and
# associated stabilizers and orbits
if y == False:
for l in range(1, j):
strong_gens_distr[l].append(h)
stabs[l] = PermutationGroup(strong_gens_distr[l])
transversals[l] = dict(stabs[l].orbit_transversal(base[l],\
pairs=True))
orbs[l] = transversals[l].keys()
c = 0
else:
c += 1
# build the strong generating set
strong_gens = strong_gens_distr[0][:]
for gen in strong_gens_distr[1]:
if gen not in strong_gens:
strong_gens.append(gen)
return base, strong_gens
def schreier_vector(self, alpha):
"""
Computes the schreier vector for ``alpha``.
The Schreier vector efficiently stores information
about the orbit of ``alpha``. It can later be used to quickly obtain
elements of the group that send ``alpha`` to a particular element
in the orbit. Notice that the Schreier vector depends on the order
in which the group generators are listed. For a definition, see [3].
Since list indices start from zero, we adopt the convention to use
"None" instead of 0 to signify that an element doesn't belong
to the orbit.
For the algorithm and its correctness, see [2], pp.78-80.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([2,4,6,3,1,5,0])
>>> b = Permutation([0,1,3,5,4,6,2])
>>> G = PermutationGroup([a,b])
>>> G.schreier_vector(0)
[-1, None, 0, 1, None, 1, 0]
See Also
========
orbit
"""
n = self.degree
v = [None]*n
v[alpha] = -1
orb = [alpha]
used = [False]*n
used[alpha] = True
gens = self.generators
r = len(gens)
for b in orb:
for i in range(r):
temp = gens[i](b)
if used[temp] == False:
orb.append(temp)
used[temp] = True
v[temp] = i
return v
def stabilizer(self, alpha):
r"""
Returns the stabilizer subgroup of ``alpha``.
The stabilizer of `\alpha` is the group `G_\alpha =
\{g \in G | g(\alpha) = \alpha\}`.
For a proof of correctness, see [1], p.79.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.stabilizer(5)
PermutationGroup([Permutation([4, 3, 2, 1, 0, 5]),
Permutation([0, 1, 2, 3, 4, 5])])
See Also
========
orbit
"""
n = self.degree
orb = [alpha]
table = {alpha: _new_from_array_form(range(n))}
used = [False]*n
used[alpha] = True
gens = self.generators
stab_gens = []
for b in orb:
for gen in gens:
temp = gen(b)
if used[temp] == False:
gen_temp = gen*table[b]
orb.append(temp)
table[temp] = gen_temp
used[temp] = True
else:
schreier_gen = (~table[temp])*gen*table[b]
if schreier_gen not in stab_gens:
stab_gens.append(schreier_gen)
return PermutationGroup(list(stab_gens))
def stabilizers_gens(self):
"""
Schreier-Sims stabilizers generators
Return the generators of the stabilizers chain in the
Schreier-Sims representation.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.stabilizers_gens()
[[0, 2, 1]]
"""
if not self._coset_repr:
self.schreier_sims()
return self._stabilizers_gens
@property
def strong_gens(self):
"""
Return a strong generating set from the Schreier-Sims algorithm.
A generating set `S = \{g_1, g_2, ..., g_t\}` for a permutation group
`G` is a strong generating set relative to the sequence of points
(referred to as a "base") `(b_1, b_2, ..., b_k)` if, for
`1 \leq i \leq k` we have that the intersection of the pointwise
stabilizer `G^{(i+1)} := G_{b_1, b_2, ..., b_i}` with `S` generates
the pointwise stabilizer `G^{(i+1)}`. The concepts of a base and
strong generating set and their applications are discussed in depth
in [1],pp.87-89 and [2],pp.55-57.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> D.strong_gens
[Permutation([1, 2, 3, 0]), Permutation([3, 2, 1, 0]),\
Permutation([0, 3, 2, 1])]
>>> D.base
[0, 1]
See Also
========
base, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._strong_gens == []:
self.schreier_sims()
return self._strong_gens
def subgroup_search(self, prop, base=None, strong_gens=None, tests=None,\
init_subgroup=None):
"""
Find the subgroup of all elements satisfying the property ``prop``.
This is done by a depth-first search with respect to base images that
uses several tests to prune the search tree.
Parameters
==========
prop
The property to be used. Has to be callable on group elements
and always return ``True`` or ``False``. It is assumed that
all group elements satisfying ``prop`` indeed form a subgroup.
base
A base for the supergroup.
strong_gens
A strong generating set for the supergroup.
tests
A list of callables of length equal to the length of ``base``.
These are used to rule out group elements by partial base images, so
that ``tests[l](g)`` returns False if the element ``g`` is known not
to satisfy prop base on where g sends the first ``l + 1`` base points.
``init_subgroup`` - if a subgroup of the saught group is known in
advance, it can be passed to the function as this parameter.
Returns
=======
res
The subgroup of all elements satisfying ``prop``. The generating set
for this group is guaranteed to be a strong generating set relative to
the base ``base``.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(7)
>>> prop_even = lambda x: x.is_even
>>> base, strong_gens = S.schreier_sims_incremental()
>>> G = S.subgroup_search(prop_even, base=base, strong_gens=strong_gens)
>>> G == AlternatingGroup(7)
True
>>> _verify_bsgs(G, base, G.generators)
True
Notes
=====
This function is extremely lenghty and complicated and will require
some careful attention. The implementation is described in
[1],pp.114-117, and the comments for the code here follow the lines
of the pseudocode in the book for clarity.
The complexity is exponential in general, since the search process by
itself visits all members of the supergroup. However, there are a lot
of tests which are used to prune the search tree, and users can define
their own tests via the ``tests`` parameter, so in practice, and for
some computations, it's not terrible.
A crucial part in the procedure is the frequent base change performed
(this is line 11 in the pseudocode) in order to obtain a new basic
stabilizer. The book mentiones that this can be done by using
``.baseswap(...)``, however the current imlementation uses a more
straightforward way to find the next basic stabilizer - calling the
function ``.stabilizer(...)`` on the previous basic stabilizer.
"""
# initialize BSGS and basic group properties
if base is None:
base, strong_gens = self.schreier_sims_incremental()
base_len = len(base)
degree = self.degree
identity = _new_from_array_form(range(degree))
base_ordering = _base_ordering(base, degree)
# add an element larger than all points
base_ordering.append(degree)
# add an element smaller than all points
base_ordering.append(-1)
# compute BSGS-related structures
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,\
strong_gens_distr)
# handle subgroup initialization and tests
if init_subgroup is None:
init_subgroup = PermutationGroup([identity])
if tests is None:
trivial_test = lambda x: True
tests = []
for i in xrange(base_len):
tests.append(trivial_test)
# line 1: more initializations.
res = init_subgroup
f = base_len - 1
l = base_len - 1
# line 2: set the base for K to the base for G
res_base = base[:]
# line 3: compute BSGS and related structures for K
res_base, res_strong_gens = res.schreier_sims_incremental(base=res_base)
res_strong_gens_distr = _distribute_gens_by_base(res_base,\
res_strong_gens)
res_basic_orbits_init_base =\
[PermutationGroup(res_strong_gens_distr[i]).orbit(res_base[i])\
for i in range(base_len)]
# initialize orbit representatives
orbit_reps = [None]*base_len
# line 4: orbit representatives for f-th basic stabilizer of K
stab_f = PermutationGroup(res_strong_gens_distr[f])
orbits = stab_f.orbits()
reps = []
for orbit in orbits:
# get the minimal element in the base ordering
rep = min(orbit, key = lambda point: base_ordering[point])
reps.append(rep)
orbit_reps[f] = reps
# line 5: remove the base point from the representatives to avoid
# getting the identity element as a generator for K
orbit_reps[f].remove(base[f])
# line 6: more initializations
c = [0]*base_len
u = [identity]*base_len
sorted_orbits = [None]*base_len
for i in range(base_len):
sorted_orbits[i] = basic_orbits[i][:]
sorted_orbits[i].sort(key = lambda point: base_ordering[point])
# line 7: initializations
mu = [None]*base_len
nu = [None]*base_len
# this corresponds to the element smaller than all points
mu[l] = degree + 1
temp_index = len(basic_orbits[l])+1-len(res_basic_orbits_init_base[l])
if temp_index >= len(basic_orbits[l]):
# this corresponds to the element larger than all points
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
# initialize computed words
computed_words = [identity]*base_len
# line 8: main loop
while True:
# apply all the tests
while l < base_len - 1 and\
computed_words[l](base[l]) in orbit_reps[l] and\
base_ordering[computed_words[l](base[l])] >\
base_ordering[mu[l]] and\
base_ordering[computed_words[l](base[l])] <\
base_ordering[nu[l]] and\
tests[l](computed_words):
# line 11: change the (partial) base of K
new_point = computed_words[l](base[l])
res_base[l] = new_point
temp_group = PermutationGroup(res_strong_gens_distr[l])
new_stab = temp_group.stabilizer(new_point)
res_strong_gens_distr[l + 1] = new_stab.generators
# line 12: calculate minimal orbit representatives for the
# l+1-th basic stabilizer
orbits = new_stab.orbits()
reps = []
for orbit in orbits:
rep = min(orbit, key = lambda point: base_ordering[point])
reps.append(rep)
orbit_reps[l + 1] = reps
# line 13: amend sorted orbits
l += 1
temp_orbit = [computed_words[l-1](point) for point\
in basic_orbits[l]]
temp_orbit.sort(key = lambda point: base_ordering[point])
sorted_orbits[l] = temp_orbit
# lines 14 and 15: update variables used minimality tests
new_mu = degree + 1
for i in range(l):
if base[l] in res_basic_orbits_init_base[i]:
candidate = computed_words[i](base[i])
if base_ordering[candidate] > base_ordering[new_mu]:
new_mu = candidate
mu[l] = new_mu
temp_index = len(basic_orbits[l]) + 1 -\
len(res_basic_orbits_init_base[l])
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
# line 16: determine the new transversal element
c[l] = 0
temp_point = sorted_orbits[l][c[l]]
temp_element = ~(computed_words[l - 1])
gamma = temp_element(temp_point)
u[l] = transversals[l][gamma]
# update computed words
computed_words[l] = computed_words[l-1] * u[l]
# lines 17 & 18: apply the tests to the group element found
g = computed_words[l]
temp_point = g(base[l])
if l == base_len - 1 and\
base_ordering[temp_point] > base_ordering[mu[l]] and\
base_ordering[temp_point] < base_ordering[nu[l]] and\
temp_point in orbit_reps[l] and\
tests[l](computed_words) and\
prop(g):
# line 19: reset the base of K
gens = res.generators[:]
gens.append(g)
res = PermutationGroup(gens)
res_base = base[:]
# line 20: recalculate basic orbits (and transversals)
res_strong_gens.append(g)
res_strong_gens_distr = _distribute_gens_by_base(res_base,\
res_strong_gens)
res_basic_orbits_init_base =\
[PermutationGroup(res_strong_gens_distr[i]).orbit(res_base[i])\
for i in range(base_len)]
# line 21: recalculate orbit representatives
stab_f = PermutationGroup(res_strong_gens_distr[f])
temp_orbits = stab_f.orbits()
reps = []
for orbit in orbits:
rep = min(orbit, key = lambda point: base_ordering[point])
reps.append(rep)
orbit_reps[f] = reps
# line 22: reset the search depth
l = f
# line 23: go up the tree until in the first branch not fully
# searched
while l >= 0 and c[l] == len(basic_orbits[l]) - 1:
l = l - 1
# line 24: if the entire tree is traversed, return K
if l == -1:
return res
# lines 25-27: update orbit representatives
if l < f:
# line 26
f = l
c[l] = 0
# line 27
stab_f = PermutationGroup(res_strong_gens_distr[f])
temp_orbits = stab_f.orbits()
reps = []
for orbit in orbits:
rep = min(orbit, key = lambda point: base_ordering[point])
reps.append(rep)
orbit_reps[f] = reps
# line 28: update variables used for minimality testing
mu[l] = degree + 1
temp_index = len(basic_orbits[l]) + 1 -\
len(res_basic_orbits_init_base[l])
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
# line 29: set the next element from the current branch and update
# accorndingly
c[l] += 1
if l == 0:
element = identity
else:
element = ~(computed_words[l - 1])
gamma = element(sorted_orbits[l][c[l]])
u[l] = transversals[l][gamma]
if l == 0:
computed_words[l] = u[l]
else:
computed_words[l] = computed_words[l - 1]*u[l]
@property
def transitivity_degree(self):
"""
Compute the degree of transitivity of the group.
A permutation group `G` acting on `\Omega = \{0, 2, ..., n-1\}` is
`k`-fold transitive, if, for any k points
`(a_1, a_2, ..., a_k)\in\Omega` and any k points
`(b_1, b_2, ..., b_k)\in\Omega` there exists `g\in G` such that
`g(a_1)=b_1, g(a_2)=b_2, ..., g(a_k)=b_k`
The degree of transitivity of `G` is the maximum `k` such that
`G` is `k`-fold transitive. ([8])
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a,b])
>>> G.transitivity_degree
3
See Also
========
is_transitive, orbit
"""
if self._transitivity_degree is None:
n = self.degree
max_size = 1
for i in range(1, n + 1):
max_size *= n - i + 1
orb = self.orbit(range(i), 'tuples')
if len(orb) != max_size:
return i - 1
self._transitivity_degree = n
return n
else:
return self._transitivity_degree
PermGroup = PermutationGroup
|
{"hexsha": "825ac29f5bb69200474bb3b9fd3cd72d174a5c3e", "size": 115660, "ext": "py", "lang": "Python", "max_stars_repo_path": "sympy/combinatorics/perm_groups.py", "max_stars_repo_name": "sn6uv/sympy", "max_stars_repo_head_hexsha": "5b149c2f72847e4785c65358b09d99b29f101dd5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sympy/combinatorics/perm_groups.py", "max_issues_repo_name": "sn6uv/sympy", "max_issues_repo_head_hexsha": "5b149c2f72847e4785c65358b09d99b29f101dd5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sympy/combinatorics/perm_groups.py", "max_forks_repo_name": "sn6uv/sympy", "max_forks_repo_head_hexsha": "5b149c2f72847e4785c65358b09d99b29f101dd5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0378673129, "max_line_length": 183, "alphanum_fraction": 0.5295607816, "include": true, "reason": "from sympy", "num_tokens": 28929}
|
import torch
import numpy as np
def compute_gradient(x):
# compute gradients of deformation fields x =[u, v]
# x: deformation field with 2 channels as x- and y- dimensional displacements
# du/dx = (u(x+1)-u(x-1)/2
bsize, csize, height, width = x.size()
xw = torch.cat((torch.zeros(bsize, csize, height, 1).cuda(), x, torch.zeros(bsize, csize, height, 1).cuda()), 3)
d_x = (torch.index_select(xw, 3, torch.arange(2, width+2).cuda()) - torch.index_select(xw, 3, torch.arange(width).cuda()))/2 #[du/dx, dv/dx]
xh = torch.cat((torch.zeros(bsize, csize, 1, width).cuda(), x, torch.zeros(bsize, csize, 1, width).cuda()), 2)
d_y = (torch.index_select(xh, 2, torch.arange(2, height+2).cuda()) - torch.index_select(xh, 2, torch.arange(height).cuda()))/2 #[du/dy, dv/dy]
d_xy = torch.cat((d_x, d_y), 1)
d_xy = torch.index_select(d_xy, 1, torch.tensor([0, 2, 1, 3]).cuda()) #[du/dx, du/dy, dv/dx, dv/dy]
return d_xy
def centre_crop(img, size, centre):
img_new = np.zeros((img.shape[0],size,size))
h1 = np.amin([size//2, centre[0]])
h2 = np.amin([size//2, img.shape[1]-centre[0]])
w1 = np.amin([size//2, centre[1]])
w2 = np.amin([size//2, img.shape[2]-centre[1]])
img_new[:,size//2-h1:size//2+h2,size//2-w1:size//2+w2] = img[:,centre[0]-h1:centre[0]+h2,centre[1]-w1:centre[1]+w2]
return img_new
|
{"hexsha": "4be183fdd9bfd705cddfdcba77aa521dad1c5ba1", "size": 1361, "ext": "py", "lang": "Python", "max_stars_repo_path": "util.py", "max_stars_repo_name": "cq615/Biomechanics-informed-motion-tracking", "max_stars_repo_head_hexsha": "e01419b31d67a87fc410292f253f05afe7935f7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-08-25T13:09:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T05:24:27.000Z", "max_issues_repo_path": "util.py", "max_issues_repo_name": "shuowang26/Biomechanics-informed-motion-tracking", "max_issues_repo_head_hexsha": "e01419b31d67a87fc410292f253f05afe7935f7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-02T14:52:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-25T03:54:54.000Z", "max_forks_repo_path": "util.py", "max_forks_repo_name": "shuowang26/Biomechanics-informed-motion-tracking", "max_forks_repo_head_hexsha": "e01419b31d67a87fc410292f253f05afe7935f7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-08-25T13:09:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T04:00:44.000Z", "avg_line_length": 50.4074074074, "max_line_length": 147, "alphanum_fraction": 0.6252755327, "include": true, "reason": "import numpy", "num_tokens": 495}
|
import numpy as np
from collections import defaultdict
np.random.seed(7)
class Agent:
def __init__(self, nA=6, alpha = 0.5, gamma = 0.85, start_epsilon = 1):
""" Initialize agent.
Params
======
- nA (int): number of actions available to the agent
- alpha (float): step-size parameter for the update step
- gamma (float): discount rate (always between 0 and 1, inclusive)
- epsilon (float): value of random action probability
"""
self.nA = nA
# initialize action-value function (empty dictionary of arrays)
self.Q = defaultdict(lambda: np.zeros(self.nA))
self.start_epsilon = start_epsilon
self.alpha = alpha
self.gamma = gamma
self.num_episode = 1
def get_probs(self, state):
""" Obtains the action probabilities corresponding to epsilon-greedy policy """
# decrease epsilon with each episode
epsilon = self.start_epsilon / self.num_episode
# all possible action with equal probabilities
policy_s = np.ones(self.nA) * epsilon / self.nA
policy_s[np.argmax(state)] = 1 - epsilon + (epsilon / self.nA)
return policy_s
def select_action(self, state):
""" Given the state, select an action.
Params
======
- state: the current state of the environment
Returns
=======
- action: an integer, compatible with the task's action space
"""
action = np.random.choice(np.arange(self.nA), p=self.get_probs(self.Q[state]))
return action
def step(self, state, action, reward, next_state, done):
""" Update the agent's knowledge, using the most recently sampled tuple.
Params
======
- state: the previous state of the environment
- action: the agent's previous choice of action
- reward: last reward received
- next_state: the current state of the environment
- done: whether the episode is complete (True or False)
"""
if not done:
# estimate in Q-table for state, action pair
old_esimate = self.Q[state][action]
# alternative estimate for next state, action pair
target = reward + self.gamma * np.max(self.Q[next_state])
# error in estimate
error = target - old_esimate
# get updated Q(s,a) value
self.Q[state][action] = old_esimate + self.alpha * error
else:
self.num_episode +=1
|
{"hexsha": "2936149721c975f0bf53936471c266984e358257", "size": 2566, "ext": "py", "lang": "Python", "max_stars_repo_path": "p_lab-taxi/Sarsamax/agent.py", "max_stars_repo_name": "and-buk/Udacity-DRLND", "max_stars_repo_head_hexsha": "c796137734e8d848da37a2f9be38f2fb1fdc176d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-09T10:51:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-07T20:57:25.000Z", "max_issues_repo_path": "p_lab-taxi/Sarsamax/agent.py", "max_issues_repo_name": "and-buk/reinforcement-learning", "max_issues_repo_head_hexsha": "c796137734e8d848da37a2f9be38f2fb1fdc176d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "p_lab-taxi/Sarsamax/agent.py", "max_forks_repo_name": "and-buk/reinforcement-learning", "max_forks_repo_head_hexsha": "c796137734e8d848da37a2f9be38f2fb1fdc176d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6571428571, "max_line_length": 87, "alphanum_fraction": 0.5974279034, "include": true, "reason": "import numpy", "num_tokens": 562}
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
CIFAR-10 contains color images of 10 classes.
More info at: http://www.cs.toronto.edu/~kriz/cifar.html
"""
import logging
import numpy as np
import os
import tarfile
from neon.datasets.dataset import Dataset
from neon.util.compat import range
from neon.util.persist import deserialize
logger = logging.getLogger(__name__)
class CIFAR10(Dataset):
"""
Sets up a CIFAR-10 dataset.
Attributes:
url (str): where to find the source data
backend (neon.backends.Backend): backend used for this data
inputs (dict): structure housing the loaded train/test/validation
input data
targets (dict): structure housing the loaded train/test/validation
target data
Keyword Args:
repo_path (str, optional): where to locally host this dataset on disk
"""
url = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
def __init__(self, **kwargs):
self.macro_batched = False
self.__dict__.update(kwargs)
def initialize(self):
pass
def fetch_dataset(self, save_dir):
save_dir = os.path.expandvars(os.path.expanduser(save_dir))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
repo_gz_file = os.path.join(save_dir, os.path.basename(self.url))
if not os.path.exists(repo_gz_file):
self.download_to_repo(self.url, save_dir)
data_file = os.path.join(save_dir, 'cifar-10-batches-py', 'test_batch')
if not os.path.exists(data_file):
logger.info('untarring: %s', repo_gz_file)
infile = tarfile.open(repo_gz_file)
infile.extractall(save_dir)
infile.close()
def load_file(self, filename, nclasses):
logger.info('loading: %s', filename)
dict = deserialize(filename)
full_image = np.float32(dict['data'])
full_image /= 255.
labels = np.array(dict['labels'])
onehot = np.zeros((len(labels), nclasses), dtype='float32')
for col in range(nclasses):
onehot[:, col] = (labels == col)
return (full_image, onehot)
def load(self, backend=None, experiment=None):
if self.inputs['train'] is not None:
return
if 'repo_path' in self.__dict__:
self.repo_path = os.path.expandvars(os.path.expanduser(
self.repo_path))
ncols = 32 * 32 * 3
ntrain_total = 50000
nclasses = 10
save_dir = os.path.join(self.repo_path,
self.__class__.__name__)
self.fetch_dataset(save_dir)
self.inputs['train'] = np.zeros((ntrain_total, ncols),
dtype='float32')
self.targets['train'] = np.zeros((ntrain_total, nclasses),
dtype='float32')
for i in range(5):
filename = os.path.join(save_dir, 'cifar-10-batches-py',
'data_batch_' + str(i + 1))
data, labels = self.load_file(filename, nclasses)
nrows = data.shape[0]
start = i * nrows
end = (i + 1) * nrows
self.inputs['train'][start:end] = data
self.targets['train'][start:end] = labels
if 'sample_pct' in self.__dict__:
self.sample_training_data()
filename = os.path.join(save_dir, 'cifar-10-batches-py',
'test_batch')
data, labels = self.load_file(filename, nclasses)
self.inputs['test'] = np.zeros((data.shape[0], ncols),
dtype='float32')
self.targets['test'] = np.zeros((data.shape[0], nclasses),
dtype='float32')
self.inputs['test'][:] = data
self.targets['test'][:] = labels
if hasattr(self, 'validation_pct'):
self.split_set(
self.validation_pct, from_set='train', to_set='validation')
self.format()
else:
raise AttributeError('repo_path not specified in config')
|
{"hexsha": "c47da064cd6a32c4460f9f84f1423c42f043b843", "size": 5005, "ext": "py", "lang": "Python", "max_stars_repo_path": "neon/datasets/cifar10.py", "max_stars_repo_name": "kashif/neon", "max_stars_repo_head_hexsha": "d4d8ed498ee826b67f5fda1746d2d65c8ce613d2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-17T16:54:58.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-17T16:54:58.000Z", "max_issues_repo_path": "neon/datasets/cifar10.py", "max_issues_repo_name": "kashif/neon", "max_issues_repo_head_hexsha": "d4d8ed498ee826b67f5fda1746d2d65c8ce613d2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neon/datasets/cifar10.py", "max_forks_repo_name": "kashif/neon", "max_forks_repo_head_hexsha": "d4d8ed498ee826b67f5fda1746d2d65c8ce613d2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-06-09T13:05:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-18T14:18:15.000Z", "avg_line_length": 37.9166666667, "max_line_length": 79, "alphanum_fraction": 0.5628371628, "include": true, "reason": "import numpy", "num_tokens": 1052}
|
""" Generate molecular conformations from atomic pairwise distance matrices. """
import itertools
import numpy as np
import os
from rdkit import Chem
from rdkit.Chem import AllChem
# noinspection PyPackageRequirements
from tap import Tap
class Args(Tap):
"""
System arguments.
"""
data_dir: str # Path to directory containing all distance matrices
save_dir: str # Path to directory containing output files
smiles: str # SMILES string
offset: float = 0.0005 # Offset for bounds matrix
def distmat_to_conf(args: Args) -> None:
"""
Generate molecular conformations from atomic pairwise distance matrices.
:param args: System arguments.
:return: None.
"""
os.makedirs(args.save_dir)
# Conformation counter
counter = 0
# Create molecule from SMILES
mol = Chem.MolFromSmiles(args.smiles)
mol = Chem.AddHs(mol)
ps = AllChem.ETKDG()
# Create a random conformation object
tmp = Chem.MolFromSmiles(args.smiles)
tmp = Chem.AddHs(tmp)
for _, _, files in os.walk(args.data_dir):
for f in files:
# Use the pairwise distance matrix to set the ETKDG bounds matrix
dist_mat = np.load(os.path.join(args.data_dir, f))
num_atoms = dist_mat.shape[0]
for i, j in itertools.combinations(np.arange(num_atoms), 2):
dist_mat[i][j] += args.offset
dist_mat[j][i] -= args.offset
ps.SetBoundsMat(dist_mat)
AllChem.EmbedMolecule(tmp, params=ps)
try:
# Test that the conformation is valid
c = tmp.GetConformer()
# Set the conformer Id and increment the conformation counter
c.SetId(counter)
counter += 1
# Add the conformer to the overall molecule object
mol.AddConformer(c)
except ValueError:
continue
# Print the conformations to a binary file
bin_str = mol.ToBinary()
with open(os.path.join(args.save_dir, "conformations.bin"), "wb") as b:
b.write(bin_str)
|
{"hexsha": "5a36c523730318f94015acf76991d88b91da2fd4", "size": 2194, "ext": "py", "lang": "Python", "max_stars_repo_path": "conformation/distmat_to_conf.py", "max_stars_repo_name": "ks8/conformation", "max_stars_repo_head_hexsha": "f470849d5b7b90dc5a65bab8a536de1d57c1021a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "conformation/distmat_to_conf.py", "max_issues_repo_name": "ks8/conformation", "max_issues_repo_head_hexsha": "f470849d5b7b90dc5a65bab8a536de1d57c1021a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "conformation/distmat_to_conf.py", "max_forks_repo_name": "ks8/conformation", "max_forks_repo_head_hexsha": "f470849d5b7b90dc5a65bab8a536de1d57c1021a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4722222222, "max_line_length": 81, "alphanum_fraction": 0.6061987238, "include": true, "reason": "import numpy", "num_tokens": 491}
|
# -*- coding: utf-8 -*-
"""
Es 7
QR piu stabile
R è maggiorata dalla radice di n + max di aij
"""
import numpy as np
import numpy.linalg as npl
import scipy.linalg as sci
import funzioni_Sistemi_lineari as fz
import matplotlib.pyplot as plt
def Hankel(n):
A = np.zeros((n,n), dtype = float)
for i in range(n):
for k in range(i+1-n, i+1):
if k > 0:
A[i,n-1+k-i] = 2.0**(k+1)
else:
A[i,n-1+k-i] = 2.0**(1/(2-k-1))
return A
errRelPiv = []
errRelHou = []
indCond = []
for i in range(4,41,6):
An = Hankel(i)
xn = np.ones((i,1))
b = np.dot(An,xn)
indCond.append(npl.cond(An, 2))
P,L,U,flag = fz.LU_pivot(An)
if flag != 0:
print("Sistema lineare non risolvibile con Strategia pivotale")
else:
x, flag = fz.LUsolve(L,U,P,b)
errRelPiv.append(npl.norm(xn-x, 2) / npl.norm(xn,2))
Q, R = sci.qr(An)
# Risolvo y = Qt*b
y = np.dot(Q.T, b)
x, flag = fz.Usolve(R, y)
errRelHou.append(npl.norm(xn-x, 2) / npl.norm(xn,2))
plt.plot(indCond, errRelPiv, "r", indCond, errRelHou, "b")
plt.legend(["Pivot", "QR"])
plt.xlabel("Indice di Condizionamento")
plt.ylabel("Errore relativo")
plt.show()
|
{"hexsha": "1744286c1a2b415c9f0075292d2299477f0cd975", "size": 1270, "ext": "py", "lang": "Python", "max_stars_repo_path": "sistemi_lineari/esercizi/Test7.py", "max_stars_repo_name": "luigi-borriello00/Metodi_SIUMerici", "max_stars_repo_head_hexsha": "cf1407c0ad432a49a96dcd08303213e48723c57a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-06-23T14:47:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-07T08:39:27.000Z", "max_issues_repo_path": "sistemi_lineari/esercizi/Test7.py", "max_issues_repo_name": "luigi-borriello00/Metodi_SIUMerici", "max_issues_repo_head_hexsha": "cf1407c0ad432a49a96dcd08303213e48723c57a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sistemi_lineari/esercizi/Test7.py", "max_forks_repo_name": "luigi-borriello00/Metodi_SIUMerici", "max_forks_repo_head_hexsha": "cf1407c0ad432a49a96dcd08303213e48723c57a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4583333333, "max_line_length": 72, "alphanum_fraction": 0.5527559055, "include": true, "reason": "import numpy,import scipy", "num_tokens": 437}
|
def mapc2p(xc,yc):
"""
specifies the mapping to curvilinear coordinates -- should be consistent
with mapc2p.f
"""
from numpy import abs
xp = xc + (abs(yc+.2)+ .8)/2
yp = yc
return xp,yp
|
{"hexsha": "e6b3fd15d7224f6bd0ada4ea35a459796902d08f", "size": 218, "ext": "py", "lang": "Python", "max_stars_repo_path": "book/chap23/acoustics/mapc2p.py", "max_stars_repo_name": "geoflows/geoclaw-4.x", "max_stars_repo_head_hexsha": "c8879d25405017b38392aa3b1ea422ff3e3604ea", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2016-11-13T03:11:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-07T18:59:48.000Z", "max_issues_repo_path": "book/chap23/acoustics/mapc2p.py", "max_issues_repo_name": "che-wenchao/D-Claw", "max_issues_repo_head_hexsha": "8ab5d971c9a7a7130e03a447a4b8642e292f4e88", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-01-14T18:00:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T14:25:24.000Z", "max_forks_repo_path": "book/chap23/acoustics/mapc2p.py", "max_forks_repo_name": "che-wenchao/D-Claw", "max_forks_repo_head_hexsha": "8ab5d971c9a7a7130e03a447a4b8642e292f4e88", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-01-14T17:15:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T17:28:44.000Z", "avg_line_length": 21.8, "max_line_length": 76, "alphanum_fraction": 0.5917431193, "include": true, "reason": "from numpy", "num_tokens": 70}
|
#!/usr/bin/python
import pickle
from rdkit import Chem
import os,re,glob,sys
import numpy as np
import math
from rdkit.Chem import AllChem, rdmolops
from rdkit.Chem.Descriptors import MolWt
from xyz2mol import xyz2mol
import sys
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
import subprocess
import pprint
from rdkit import RDLogger
from pycbh.cg_utils import *
#from pycbh.iep import iep
def vprint(*argu,end='\n'):
#global verbose
if verbose == 1:
for arg in argu:
print(arg, end=end)
print()
return
def str2list(st):
return [int(x) for x in st.replace(' ','').replace('[','').replace(']','').split(',')]
def molfromxyz(fn):
atoms, charge, xyz_coordinates = xyz2mol.read_xyz_file(fn)
try:
mol_idx = int(fn.replace('xyz/qm7b_','').replace('.xyz',''))-1
except:
mol_idx= int(0)
mol = xyz2mol.xyz2mol(atoms, xyz_coordinates, charge, embed_chiral=True) #False)
return mol_idx, mol
def atom2label(label):
atoms=['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg', 'Bh', 'Hs', 'Mt', 'Ds', 'Rg', 'Cn', 'Uut', 'Fl', 'Uup', 'Lv', 'Uus', 'Uuo']
if label in atoms:
return atoms.index(label)+1
else:
return label
def xyzfromsmi(smi):
mol = Chem.MolFromSmiles(smi)
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol)
Chem.Kekulize(mol, clearAromaticFlags=True)
#print(smi, mol.GetNumAtoms())
try:
mol_ = Chem.RemoveHs(mol)
with open('fragment_lookup/tmp.mol', "w") as FILE:
FILE.write(Chem.MolToMolBlock(mol_))
xyz_coordinates=list()
#print(Chem.MolToMolBlock(mol))
bashCommand = 'obabel -imol fragment_lookup/tmp.mol -oxyz --gen3d -xb'
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
output=[x.split(' ') for x in output.decode("utf-8").split("\n")[2::] ]
for i, x_ in enumerate(output):
#vprint(x_,len(x_))
if len(x_) > 3:
xyz_coordinates.append([float(x) for x in x_[1::] if len(x) > 0])
except:
xyz_coordinates=list()
#print(Chem.MolToMolBlock(mol))
with open('tmp','w') as FILE:
FILE.write(smi)
bashCommand = 'obabel -ismi tmp -oxyz --gen3d -xb '
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
#print(output)
output=[x.split(' ') for x in output.decode("utf-8").split("\n")[2::] ]
for i, x_ in enumerate(output):
#print(x_,len(x_))
if len(x_) > 3:
xyz_coordinates.append([float(x) for x in x_[1::] if len(x) > 0])
#print(coords)
atoms = [atom2label(atom.GetSymbol()) for atom in mol.GetAtoms()]
#print(atoms)
return atoms, xyz_coordinates
def smifromsmifile(fn):
return str(list(filter(None,open(fn, "r").read().split("\n")))[0])
def molfromsmi(fn):
try:
smi=smifromsmifile(fn)
except:
smi=fn
mol = Chem.MolFromSmiles(smi) #fromsmifile(fn))
return 0, mol
def xyzfromfn(fn, heavy_only=False):
atoms, charge, xyz_coordinates = xyz2mol.read_xyz_file(fn)
heavy_atoms, heavy_xyz = list(), list()
for idx, atom in enumerate(atoms):
#print(idx,atom,xyz_coordinates[idx])
if atom != 1 or not heavy_only:
heavy_atoms.append(atom)
heavy_xyz.append([round(float(x),8) for x in xyz_coordinates[idx]])
return heavy_atoms, heavy_xyz
def atom2onehot(atomtypes, atom):
return [ int(atom==x) for x in atomtypes ]
def gaussian_expansion(d, n_centers=20, sigma=0.5, min_d=0.0, max_d=4.0, centers=[None]):
if None in centers:
centers = np.linspace(min_d, max_d, n_centers)
#print('{} centers between {} and {} :\n {}'.format(n_centers,min_d,max_d,centers))
return [ round(math.exp(-(d - x)**2 / sigma**2),8) for x in centers ], centers
def transform_edges(edge, verbose=False, use_GauExp=True, use_bondinverse=False, use_z=True, use_onehotatom=True):
'''
input: edge (list) : [ bond_len, atom_num1, atom_num2 ]
'''
atomtypes = [6, 7, 8, 9, 16, 17]
#print(atomtypes)
new_edge = list()
if use_z:
new_edge.append(edge[1])
new_edge.append(edge[2])
new_edge.append(edge[0])
if use_GauExp:
new_edge.extend(gaussian_expansion(edge[0])[0])
elif use_bondinverse:
new_edge.append(1/edge[0])
if use_onehotatom:
new_edge.extend(atom2onehot(atomtypes, edge[1]))
new_edge.extend(atom2onehot(atomtypes, edge[2]))
#if verbose:
#print(atomtypes)
#print('\nold edge: {}'.format(edge))
#print('\nnew edge: {}'.format(new_edge))
return [float(x) for x in new_edge]
def get_dist(x1, x2):
return round(((x1[0]-x2[0])**2+(x1[1]-x2[1])**2+(x1[2]-x2[2])**2)**0.5,8)
def pprint_graph(graph):
print("graph = {")
print(" 'nodes':",end=' ')
for idx, a in enumerate(graph['nodes']):
if idx==0:
print(a)
else:
print(' {}'.format(a))
print(" 'edges':",end=' ')
for idx, e in enumerate(graph['edges']):
if idx==0:
print(e)
else:
print(' {}'.format(e))
print(" 'senders': {}".format(graph['senders']))
print(" 'receivers': {}".format(graph['receivers']))
print(" 'globals': {}".format(graph['globals']))
print("}")
print('graph[nodes] : {}'.format(np.asarray(graph['nodes']).shape))
print('graph[edges] : {}'.format(np.asarray(graph['edges']).shape))
print('graph[senders] : {}'.format(np.asarray(graph['senders']).shape))
print('graph[receivers] : {}'.format(np.asarray(graph['receivers']).shape))
print('graph[globals] : {}'.format(np.asarray(graph['globals']).shape))
return
def molneighbors_old(idx,mol):
incl=list()
for bond in mol.GetBonds():
idx1=bond.GetBeginAtomIdx()
idx2=bond.GetEndAtomIdx()
if idx in [idx1, idx2]:
if idx1 not in incl:
incl.append(idx1)
if idx2 not in incl:
incl.append(idx2)
return incl
def coarse_grain_old(idx,atoms,mol,cg_opts):
if len(cg_opts) > 0:
print('Coarse graining')
incl=list()
else:
incl=[[idx]]
if 'rings' in cg_opts:
print(' rings')
if mol.GetAtomWithIdx(idx).IsInRing():
ssr=Chem.GetSymmSSSR(mol)
for r in ssr:
if idx in r:
incl.append([idx]+[x for x in r])
if len(incl) < 1:
incl=[[idx]]
#incl = [x for i, x in enumerate(incl) if incl[i::].count(x) < 2]
if 'halogen' in cg_opts or 'halogens' in cg_opts:
print(' halogens')
halogens=[9, 17, 35, 53, 85, 117]
#if int(atoms[idx]) in halogens:
for idx_, inc in enumerate(incl):
for i in inc:
#incl=molneighbors(idx,mol)
if int(atoms[i]) in halogens:
for x in molneighbors(i,mol):
if x not in inc:
inc.append(x)
#incl[jdx].append(x)
#incl.extend(molneighbors(i,mol))
else:
incl_=molneighbors(i,mol)
#incl_=molneighbors(idx,mol)
for jdx in incl_:
if int(atoms[jdx]) in halogens:
if jdx not in incl:
#incl.append([idx]+jdx)
inc.append(jdx)
incl[idx_]=inc
if len(cg_opts) > 0:
print(' incl:{}'.format(incl))
return incl
def float_len(x):
return float(len(x))
def smi2graph(smi, simple_graph=False):
try:
smi = smifromsmifile(smi)
except:
pass
mol = Chem.MolFromSmiles(smi)
mol = Chem.AddHs(mol)
graph, cg_graph = mol2graph(smi, 0, mol, simple_graph=simple_graph)
return graph, cg_graph
def molfn2graph(mol, simple_graph=False):
try:
mol = Chem.MolFromMolFile(mol)
except:
pass
mol = Chem.AddHs(mol)
graph, cg_graph = mol2graph('', 0, mol, simple_graph=simple_graph)
return graph, cg_graph
def fn2graph(fn, simple_graph=False, fully_connected=False):
mol_idx, mol = molfromxyz(fn)
#print(mol)
graph, cg_graph = mol2graph(fn, mol_idx, mol, simple_graph=simple_graph,fully_connected=fully_connected)
return graph, cg_graph
def mol2graph(fn, mol_idx, mol, simple_graph=False, cg_opts=['halogen','rings'], fully_connected=False):
smi = Chem.MolToSmiles(mol,kekuleSmiles=True,canonical=True)
mol_ = Chem.RemoveHs(mol)
smi = Chem.MolToSmiles(mol_)
if '.xyz' in fn:
atoms, charge, xyz_coordinates = xyz2mol.read_xyz_file(fn)
else:
with open('fragment_lookup/tmp.mol', "w") as FILE:
FILE.write(Chem.MolToMolBlock(mol_))
xyz_coordinates=list()
#print(Chem.MolToMolBlock(mol))
bashCommand = 'obabel -imol fragment_lookup/tmp.mol -oxyz --gen3d -xb'
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
output=[x.split(' ') for x in output.decode("utf-8").split("\n")[2::] ]
for i, x_ in enumerate(output):
#vprint(x_,len(x_))
if len(x_) > 3:
xyz_coordinates.append([float(x) for x in x_[1::] if len(x) > 0])
#vprint(xyz_coordinates)
atoms = [atom2label(atom.GetSymbol()) for atom in mol.GetAtoms()]
#print(len(atoms),atoms)
#print(len(xyz_coordinates),xyz_coordinates)
#sys.exit()
'''
try:
atoms, xyz_coordinates = xyzfromfn(fn)
except:
try:
atoms, xyz_coordinates = xyzfromsmi(fn)
except:
try:
atoms, xyz_coordinates = xyzfromsmi(smi)
except:
sys.exit('failed')
'''
#atomtypes = [6, 7, 8, 9, 16, 17]
graph={'nodes':list(),
'edges':list(),
'senders':list(),
'receivers':list(),
'globals':list()
}
cg_graph={'nodes':list(),
'edges':list(),
'senders':list(),
'receivers':list(),
'globals':list()
}
#for atom in mol.GetAtoms():
# print(atom.GetExplicitValence())
#print('here')
include_H = True
cg_incld = list()
#mol = Chem.MolFromMolFile('fragment_lookup/tmp.mol')
use_coords=False
oxy_ls=[idx for idx, x in enumerate(atoms) if x==8]
#print('oxy list: {}'.format(oxy_ls))
for idx, atom in enumerate(atoms):
if idx in cg_incld+oxy_ls:
#print('{} already incl'.format(idx))
pass
elif atom != 1.0 or include_H:
cg_opts=[]
#cg_opts=['nitro','sulfo','phospho','rings']
cg_opts=['aromatic']
cg_incl_ls=coarse_grain(idx,atoms,mol,cg_opts=cg_opts)
#print('idx: {}'.format(idx))
atom_obj = mol.GetAtoms()[idx]
#print(' ',idx, float(atom), atom_obj.GetExplicitValence(), xyz_coordinates[idx])
for cg_incl in cg_incl_ls:
if use_coords:
try:
graph['nodes'].append([idx, float(atom), cg_incl, atom_obj.GetExplicitValence(), [round(x,4) for x in xyz_coordinates[idx].tolist()]])
except:
#print('{} failed'.format(idx))
graph['nodes'].append([idx, float(atom), cg_incl, atom_obj.GetExplicitValence(), [round(x,4) for x in xyz_coordinates[idx]]])
else:
graph['nodes'].append([idx, float(atom), cg_incl])
if idx not in cg_incld:
cg_graph['nodes'].append([idx, float(atom), cg_incl])
#print('added {}'.format([idx, float(atom), cg_incl]))
if idx not in cg_incld:
for cg_incl in cg_incl_ls:
cg_incld.extend(cg_incl)
#print(cg_incld)
atom = 8.0
for idx in oxy_ls:
if idx not in cg_incld:
cg_incl_ls=coarse_grain(idx,atoms,mol,cg_opts=cg_opts)
atom_obj = mol.GetAtoms()[idx]
for cg_incl in cg_incl_ls:
if use_coords:
try:
graph['nodes'].append([idx, float(atom), cg_incl, atom_obj.GetExplicitValence(), [round(x,4) for x in xyz_coordinates[idx].tolist()]])
except:
graph['nodes'].append([idx, float(atom), cg_incl, atom_obj.GetExplicitValence(), [round(x,4) for x in xyz_coordinates[idx]]])
else:
graph['nodes'].append([idx, float(atom), cg_incl])
if idx not in cg_incld:
cg_graph['nodes'].append([idx, float(atom), cg_incl])
if idx not in cg_incld:
for cg_incl in cg_incl_ls:
cg_incld.extend(cg_incl)
graph['nodes']=sorted(graph['nodes'])
#pprint_graph(graph)
#pprint.pprint(invar)
#print('before kek')
Chem.Kekulize(mol, clearAromaticFlags=True)
#print('here')
for bond in mol.GetBonds():
idx1=bond.GetBeginAtomIdx()
idx2=bond.GetEndAtomIdx()
inc=True
if atoms[idx1] == 1.0 or atoms[idx2] == 1.0:
if include_H:
inc=True
else:
inc=False
if inc: #atoms[idx1] != 1.0 and atoms[idx2] != 1.0:
if use_coords:
dist = get_dist(xyz_coordinates[idx1],xyz_coordinates[idx2])
coulomb_inter = round(float(atoms[idx1])*float(atoms[idx2])/float(dist),8)
edge1 = [atoms[idx1], atoms[idx2], bond.GetBondTypeAsDouble(), dist, coulomb_inter]
edge2 = [atoms[idx2], atoms[idx1], bond.GetBondTypeAsDouble(), dist, coulomb_inter]
else:
edge1 = [atoms[idx1], atoms[idx2], bond.GetBondTypeAsDouble()]
edge2 = [atoms[idx2], atoms[idx1], bond.GetBondTypeAsDouble()]
graph['edges'].append(edge1)
graph['senders'].append(idx1)
graph['receivers'].append(idx2)
graph['edges'].append(edge2)
graph['senders'].append(idx2)
graph['receivers'].append(idx1)
for idx, node in enumerate(cg_graph['nodes']):
if idx1 in node[2]:
if idx2 not in node[2]:
for jdx, jnode in enumerate(cg_graph['nodes']):
create_edge=True
if idx2 in jnode[2]:
for kdx, sender in enumerate(cg_graph['senders']):
if idx==sender:
if jdx==cg_graph['receivers'][kdx]:
create_edge=False
break
if create_edge:
cg_graph['edges'].append(edge1)
cg_graph['senders'].append(idx)
cg_graph['receivers'].append(jdx)
cg_graph['edges'].append(edge2)
cg_graph['senders'].append(jdx)
cg_graph['receivers'].append(idx)
if fully_connected:
for idx1 in range(len(cg_graph['nodes'])-1):
for idx2 in range(idx1+1,len(cg_graph['nodes'])):
if not is_connected(idx1,idx2,cg_graph):
dist = get_dist(xyz_coordinates[idx1],xyz_coordinates[idx2])
coulomb_inter = round(float(atoms[idx1])*float(atoms[idx2])/float(dist),8)
edge1 = [atoms[idx1], atoms[idx2], 0.0, dist, coulomb_inter]
edge2 = [atoms[idx2], atoms[idx1], 0.0, dist, coulomb_inter]
cg_graph['edges'].append(edge1)
cg_graph['senders'].append(idx1)
cg_graph['receivers'].append(idx2)
cg_graph['edges'].append(edge2)
cg_graph['senders'].append(idx2)
cg_graph['receivers'].append(idx1)
graph['globals']=[fn, smi]
cg_graph['globals']=[fn, smi]
#pprint_graph(graph)
#pprint_graph(cg_graph)
#sys.exit()
return cg_graph, graph #, cg_graph
def is_connected(idx1, idx2, graph):
for i, idx in enumerate(graph['senders']):
if idx==idx1:
if graph['receivers'][i] == idx2:
return True
return False
def check_dup(f, f_ls):
#return f_ls.count(f)
f = sorted(f)
f_ls = [sorted(x) for x in f_ls]
return f_ls.count(f)
def overlap(fragments, rung, graph):
'''
atom_or_bond == 0 for atom centric
atom_or_bond == 1 for bond centric
'''
atom_or_bond = rung % 2
overlaps = list()
for idx1, frag1 in enumerate(fragments):
for idx2, frag2 in enumerate(fragments):
calc_overlap = False
if atom_or_bond:
if len([value for value in frag1[0:2] if value in frag2[0:2]]) == 1:
calc_overlap = True
else:
#print('is_connected(frag1[0],frag2[0])=is_connected({},{})'.format(frag1,frag2))
if is_connected(frag1[0],frag2[0],graph):
calc_overlap = True
if calc_overlap and idx1 < idx2:
new_frag = [value for value in frag1 if value in frag2]
#print('overlap between {} and {} = {}'.format(frag1,frag2,new_frag))
if len(new_frag) > 0:
overlaps.append(new_frag)
if rung == 0:
bond_deg = 0.0
for bond in graph['edges']:
if bond[0] != 1 and bond[1] != 1:
bond_deg+=bond[2]
#print('overlaps = {}'.format(overlaps))
return overlaps
def collect_neighbors(idx, rung, graph):
'''
atom_or_bond == 0 for atom centric
atom_or_bond == 1 for bond centric
'''
atom_or_bond = rung % 2
incl_ls=list()
if atom_or_bond:
l = 1
found=False
for i, sender in enumerate(graph['senders']):
if sender == idx:
idx2 = graph['receivers'][i]
connected_node = graph['nodes'][idx2]
if connected_node[1] != 1.0:
if idx2 > idx and graph['edges'][i][2] > 0.0:
#incl=[idx,idx2]+graph['nodes'][idx][2]+graph['nodes'][idx2][2]
#incl_ls.append(incl)
incl_ls.append([idx,idx2])
found=True
if not found:
incl_ls.append([idx])
else:
l = 0
#incl=[idx]+graph['nodes'][idx][2]
#incl_ls.append(incl)
incl_ls.append([idx])
while l < rung:
for j, incl in enumerate(incl_ls):
new_incl=list()
for i, sender in enumerate(graph['senders']):
if sender in incl:
idx2 = graph['receivers'][i]
connected_node = graph['nodes'][idx2]
if connected_node[1] != 1.0:
if idx2 not in incl and graph['edges'][i][2] > 0.0:
new_incl.append(idx2)
#new_incl.extend(graph['nodes'][idx2][2])
incl_ls[j].extend(new_incl)
l+=2
return incl_ls
def fix_cbh(f, o):
done = False
while not done:
done = True
which = list()
for idx, x in enumerate(o):
if check_dup(x, o) > 2:
which.append(idx)
done=False
break
for i in which:
del o[i]
which = list()
for idx, x in enumerate(f):
if check_dup(x, f) > 2:
which.append(idx)
done=False
break
for i in which:
del f[i]
return f, o
def calc_cbh(rung, graph):
fragments = list()
for idx, node in enumerate(graph['nodes']):
if node[1] != 1.0:
fragments.extend(collect_neighbors(idx, rung, graph))
#print('primary fragments:',fragments)
overlaps = overlap(fragments, rung, graph)
if rung > 0:
return fix_cbh(fragments, overlaps)
else:
bond_deg = 0.0
for bond in graph['edges']:
if bond[0] != 1 and bond[1] != 1:
bond_deg+=bond[2]
return fragments, overlaps
def mol2formula(mol, incl_H=False):
atom_types=dict()
for atom in mol.GetAtoms():
symbol = atom.GetSymbol()
if symbol != 'H' or incl_H:
if symbol in atom_types:
atom_types[symbol]+=1
else:
atom_types[symbol]=1
return atom_types
def moldict2hill(atom_types):
#print('atom types: {}'.format(atom_types))
hill=list()
if 'C' in atom_types:
hill.append('C')
num = atom_types['C']
if num > 1:
hill.append('{}'.format(atom_types['C']))
if 'H' in atom_types:
hill.append('H')
num = atom_types['H']
if num > 1:
hill.append('{}'.format(atom_types['H']))
for key in sorted(atom_types):
if key not in ['C', 'H']:
hill.append(key)
num = atom_types[key]
if num > 1:
hill.append('{}'.format(atom_types[key]))
return ''.join(hill)
def combine_dicts(d1, d2):
for key in d2:
if key in d1:
d1[key]+=d2[key]
else:
d1[key]=d2[key]
return d1
def chunks(data, size=5000):
for i in range(0,len(data), size):
#yield {k:data[k] for k in islice(it, size)}
yield data[i:i+size]
def formula2ha_old(formula):
ha_count=0
spl=list(formula)
for i in range(len(spl)):
try:
c = int(spl[i])-1
if spl[i-1] != 'H':
ha_count+=c
except:
if spl[i] != 'H':
ha_count+=1
continue
return ha_count
def formula2ha(f):
ha_count=0
spl=list(f)
k, v = '', '0'
num_mode=True
for x in spl:
if x.isalpha():
if num_mode:
if k != 'H':
ha_count+=int(v)
k, v = '', ''
num_mode=False
elif x.isupper():
ha_count+=1
k, v = '', ''
k+=x
else:
try:
int(x)
v+=str(x)
num_mode=True
except:
pass
if not num_mode:
ha_count+=1
elif k != 'H':
ha_count+=int(v)
return ha_count
def get_frag_fn(formula,smi,smi2,frag_keys):
'''
frag_keys structure:
dict() = {formula : [idx, smi, smi2, fn]}
example:
'C2H6' : [[0, 'CC', '[H][C]([H])([H])[C]([H])([H])[H]H', 'f2_C2H6_000']]
'''
if formula in frag_keys:
for f in frag_keys[formula]:
if smi == f[1]:
#with open('coeff.txt','a') as FILE:
# FILE.write(' '.join([str(x) for x in f])+'\n')
return f[-1], False, frag_keys
idx=len(frag_keys[formula])
fn='f{}_{}_{:03d}'.format(formula2ha(formula),formula,idx)
frag_keys[formula].append([idx, smi, smi2, fn])
#with open('coeff.txt','a') as FILE:
# FILE.write(' '.join([str(x) for x in f])+'\n')
else:
fn='f{}_{}_{:03d}'.format(formula2ha(formula),formula,0)
frag_keys[formula]=[[0, smi, smi2, fn]]
#with open('coeff.txt','a') as FILE:
# FILE.write(' '.join([str(x) for x in [0, smi, smi2, fn]])+'\n')
return fn, True, frag_keys
def fraginc2smi(f, mol, frag_keys, frag_type=None, kekulize=False):
RDLogger.DisableLog('rdApp.*')
smi = Chem.MolToSmiles(mol)
#print('{:02d}'.format(f[0]), end=' ')
if kekulize:
Chem.Kekulize(mol, clearAromaticFlags=True)
mw = Chem.RWMol(mol)
numatoms = mw.GetNumAtoms()
total_deg = [atom.GetTotalValence() for atom in mw.GetAtoms()]
for i in range(numatoms):
idx = numatoms-1-i
if idx not in f:
mw.RemoveAtom(idx)
numatoms = mw.GetNumAtoms()
#if len(Chem.GetSymmSSSR(mw)) < 1:
mw = Chem.RWMol(Chem.AddHs(mw))
#print(total_deg)
#print('a : {}'.format([atom.GetAtomicNum() for atom in mol.GetAtoms()]))
#print('f : {}'.format(f))
for idx, val in enumerate(total_deg):
if idx in f:
idx2 = sorted(list(set(f))).index(idx)
atom = mw.GetAtomWithIdx(idx2)
if atom.GetAtomicNum() != 1:
if atom.GetTotalValence() != val:
#print('{}({})'.format(idx,atom.GetAtomicNum()))
#print('VALENCE DOES NOT MATCH {} -> {}'.format(atom.GetTotalValence(),val))
#print('numatoms : {}'.format(mw.GetNumAtoms()))
for _ in range(val-atom.GetTotalValence()):
idx_h = mw.AddAtom(Chem.Atom(1))
#print('added H {}'.format(idx_h))
mw.AddBond(idx2,idx_h,Chem.BondType.SINGLE)
#print('numatoms : {}'.format(mw.GetNumAtoms()))
#sys.exit()
idx_rings = list()
for r in Chem.GetSymmSSSR(mw):
for x in r:
if x not in idx_rings:
idx_rings.append(x)
#print(idx_rings)
if True:
for idx, atom in enumerate(mw.GetAtoms()):
if idx not in idx_rings:
atom.SetIsAromatic(False)
if len(Chem.GetSymmSSSR(mw)) < 1:
try:
Chem.Kekulize(mw, clearAromaticFlags=True)
smi = Chem.MolToSmiles(mw,kekuleSmiles=True,canonical=True)
except:
print('Cannot kekulize mw')
smi = Chem.MolToSmiles(mw)
#smi = Chem.MolToSmiles(mw,kekuleSmiles=True,canonical=True)
else:
smi = Chem.MolToSmiles(mw)
mol = Chem.MolFromSmiles(smi)
if mol == None:
if 'n' in smi:
smi = smi.replace('n','[nH]')
elif ':O:' in smi:
smi = smi.replace(':O:','[O]')
mol = Chem.MolFromSmiles(smi)
try:
Chem.Kekulize(mol, clearAromaticFlags=True)
smi = Chem.MolToSmiles(mol,kekuleSmiles=True)
except:
pass
#smi = Chem.MolToSmiles(mol,kekuleSmiles=True)
#print(smi, mol)
#mw = Chem.AddHs(mw)
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol)
#print(smi, mol.GetNumAtoms())
formula=moldict2hill(mol2formula(mol, incl_H=True))
smi2=Chem.MolToSmiles(mol,allHsExplicit=True,allBondsExplicit=False)
mol.SetProp("_Name","{} {} {}".format(formula,smi,smi2))
if '.' in smi:
smi_ls = smi.split('.')
for s in smi_ls:
mol_s = Chem.MolFromSmiles(s)
mol_s = Chem.AddHs(mol_s)
AllChem.EmbedMolecule(mol_s)
s2 = Chem.MolToSmiles(mol_s,allHsExplicit=True,allBondsExplicit=False)
formula=moldict2hill(mol2formula(mol_s, incl_H=True))
mol.SetProp("_Name","{} {} {}".format(formula,s,s2))
frag_fn, make_mol, frag_keys=get_frag_fn(formula,s,s2,frag_keys)
if make_mol:
with open('fragment_lookup/'+frag_fn+'.mol', "w") as fn:
fn.write(Chem.MolToMolBlock(mol_s))
#print('Written to fragment_lookup/{}.mol'.format(frag_fn))
else:
frag_fn, make_mol, frag_keys=get_frag_fn(formula,smi,smi2,frag_keys)
#print(Chem.MolToMolBlock(mol))
if make_mol:
with open('fragment_lookup/'+frag_fn+'.mol', "w") as fn:
fn.write(Chem.MolToMolBlock(mol))
#print('Written to fragment_lookup/{}.mol'.format(frag_fn))
return smi, mol2formula(mol, incl_H=True), frag_fn, frag_keys
def cbh_print(cbh):
print()
for c in cbh:
if type(c[-1]) == dict:
c=c[1:-1]
else:
c=c[1:]
print('\n'+'\n'.join([' '+x for x in c]))
pass
def print_cbh(mol,cbh_dict,rung):
react_ls, prod_ls = list(), list()
#print('\nCBH-{}:'.format(rung))
mol = Chem.RWMol(mol)
mol = Chem.RemoveHs(mol)
Chem.Kekulize(mol, clearAromaticFlags=True)
#print(' '+Chem.MolToSmiles(mol,kekuleSmiles=True,canonical=True),end=' ')
for key in cbh_dict:
if cbh_dict[key] < 0:
if cbh_dict[key] == -1:
react_ls.append(' {} '.format(key))
else:
react_ls.append(' {} {} '.format(-1*cbh_dict[key],key))
elif cbh_dict[key] > 0:
if cbh_dict[key] == 1:
prod_ls.append(' {} '.format(key))
else:
prod_ls.append(' {} {} '.format(cbh_dict[key],key))
cbh_str='CBH-{}:\n'.format(rung)+' '+Chem.MolToSmiles(mol,kekuleSmiles=True,canonical=True)+' '
if len(react_ls) > 0:
#print('+',end='')
cbh_str=cbh_str+'+'
#print('+'.join(react_ls),end=' --> ')
#print('+'.join(prod_ls))
#print(cbh_dict)
return cbh_str+'+'.join(react_ls)+' --> '+'+'.join(prod_ls)
def add_attr2graph(graph,rung,which_idx,smi,frag_fn):
if len(which_idx) == 1:
if rung % 2 == 0:
graph['nodes'][which_idx[0]].append(['CBH-{}'.format(rung),smi,frag_fn])
else:
for idx, idx1 in enumerate(graph['senders']):
idx2 = graph['receivers'][idx]
if idx1 == which_idx[0] and idx2 == which_idx[1]:
graph['edges'][idx].append(['CBH-{}'.format(rung),smi,frag_fn])
elif idx2 == which_idx[0] and idx1 == which_idx[1]:
graph['edges'][idx].append(['CBH-{}'.format(rung),smi,frag_fn])
return graph
if __name__=='__main__':
if len(sys.argv[1::]) < 2:
sys.exit('Usage python3 fp_gen.py CBH_rung FILENAME(s)')
fns=sys.argv[2::]
try:
rung_ls=[int(sys.argv[1])]
except:
if sys.argv[1]=='all':
rung_ls=[0,1,2,3,4]
else:
sys.exit('Usage: python3 fp_gen.py CBH_rung FILENAME(s)\n CBH rung must be an integer')
key_fn='fragment_lookup/key.txt'
with open(key_fn, "a"): pass
frag_keys=dict()
for y in [x.split(" ") for x in list(filter(None,open(key_fn, "r").read().split("\n")))]:
if y[0] in frag_keys:
frag_keys[y[0]].append(y[1::])
else:
frag_keys[y[0]]=[y[1::]]
verbose=False
graph=True
save_graph=False
#rung=2
#smiles=True
if graph:
graphs_ls = list()
cbh_store=list()
for fn in fns:
print('{} :'.format(fn),end=' ')
try:
if '.smi' in fn: #smiles:
graph = smi2graph(smifromsmifile(fn))
else:
graph = fn2graph(fn)
#print('G:',graph)
#graphs_ls.append(graph)
if '.smi' in fn:
mol_idx, mol = molfromsmi(fn)
else:
mol_idx, mol = molfromxyz(fn)
Chem.Kekulize(mol, clearAromaticFlags=True)
print('{}'.format(Chem.MolToSmiles(Chem.RemoveHs(mol),kekuleSmiles=True,canonical=True)))
for rung in rung_ls:
cbh_p_, cbh_r = calc_cbh(rung, graph)
f_dict = iep(cbh_p_)
print(f_dict)
cbh_p, cbh_r=list(), list()
for key in f_dict:
if key%2:
cbh_p.extend(f_dict[key])
else:
cbh_r.extend(f_dict[key])
print('primary = {}'.format(cbh_p))
print('overlap = {}'.format(cbh_r))
'''
if '.smi' in fn:
mol_idx, mol = molfromsmi(fn)
else:
mol_idx, mol = molfromxyz(fn)
Chem.Kekulize(mol, clearAromaticFlags=True)
'''
cbh_dict = dict()
r_atoms = mol2formula(Chem.AddHs(mol),incl_H=True)
p_atoms = dict()
for f in cbh_p:
#print('sending f ({}) to fraginc2smi'.format(f))
smi, atoms, frag_fn, frag_keys = fraginc2smi(f, mol, frag_keys, frag_type='primary')
p_atoms = combine_dicts(p_atoms, atoms)
'''
atom_or_bond == 0 for atom centric
atom_or_bond == 1 for bond centric
'''
if f in cbh_p_:
atom_or_bond = rung % 2
which_idx = [f[0]]
if atom_or_bond:
which_idx.append(f[1])
print(' info:',rung,which_idx,smi,frag_fn)
graph = add_attr2graph(graph,rung,which_idx,smi,frag_fn)
if '.' in smi:
smi_ls=smi.split('.')
else:
smi_ls=[smi]
for smi in smi_ls:
if smi in cbh_dict:
cbh_dict[smi]+=1
else:
cbh_dict[smi]=1
for f in cbh_r:
smi, atoms, frag_fn, frag_keys = fraginc2smi(f, mol, frag_keys, frag_type='overlap')
r_atoms = combine_dicts(r_atoms, atoms)
if [sorted(x) for x in cbh_r].count(sorted(f)) > 1:
print('\n{} matches something else'.format(f))
'''
if smi in cbh_dict:
cbh_dict[smi]+=0.5
else:
cbh_dict[smi]=0.5
atoms.update((x, y*-0.5) for x, y in atoms.items())
print(atoms)
r_atoms = combine_dicts(r_atoms, atoms)
'''
if '.' in smi:
smi_ls=smi.split('.')
else:
smi_ls=[smi]
for smi in smi_ls:
if smi in cbh_dict:
cbh_dict[smi]-=1
else:
cbh_dict[smi]=-1
if rung == 0:
'''
bond_deg = 0.0
for bond in graph['edges']:
if bond[0] != 1 and bond[1] != 1:
bond_deg+=bond[2]
cbh_dict['[H][H]'] = -1*int(0.5*bond_deg)
'''
if not p_atoms['H'] == r_atoms['H']:
net_H=abs(p_atoms['H']-r_atoms['H'])
cbh_dict['[H][H]'] = -1*int(0.5*net_H)
r_atoms['H']+=net_H
cbh = print_cbh(mol,cbh_dict,rung)
if not sorted(p_atoms.items()) == sorted(r_atoms.items()):
print(" WARNING: Atom counts dont match")
net_atoms=dict()
for key in p_atoms:
if key in r_atoms:
net_atoms[key] = p_atoms[key]-r_atoms[key]
#print(' Product: {}\n Reactant: {}'.format(sorted(p_atoms.items()),sorted(r_atoms.items())))
print(' Atom counts: {}'.format(net_atoms))
cbh_store.append([fn,cbh," WARNING: Atom counts dont match", " Atom counts: {}".format(net_atoms)])
#sys.exit(cbh_store)
else:
cbh_store.append([fn,cbh]) #," (Atom counts are balanced)"])
print("\n (Atom counts are balanced)")
print(' Atom counts: {}'.format(net_atoms))
graphs_ls.append(graph)
pprint_graph(graph)
except:
print(' FAILED pycbh/utils.py : 976')
pass
print('\nLoaded {} graphs'.format(len(graphs_ls)))
for c in cbh_store:
print('\n'+'\n'.join(c))
with open(key_fn, 'w') as new_key_fn:
i=0
for key in sorted(frag_keys):
for frag in frag_keys[key]:
new_key_fn.write('{} {}\n'.format(key," ".join([str(x) for x in frag])))
i+=1
print('\n{} updated to {} fragments'.format(key_fn,i))
if save_graph:
graph_chunks=list()
for item in chunks(graphs_ls, size=len(graphs_ls)//25):
graph_chunks.append(item)
print(' {} chunks made'.format(len(graph_chunks)))
which_fp=picklefn.replace('.pickle','')
if not os.path.exists("qm7b_graphs/"+which_fp):
os.makedirs("qm7b_graphs/"+which_fp)
print('Made dir: {}'.format("qm7b_graphs/"+which_fp))
fn_base="qm7b_graphs/"+which_fp+"/qm7b_graphs_"+which_fp
print('\nWriting to files {}_###.npy'.format(fn_base))
for idx, item in enumerate(graph_chunks):
fn=fn_base+'_{:03d}'.format(idx)+'.npy'
np.save(fn, item)
print(' written to {}'.format(fn))
else:
for fn in fns:
fp = fn2fp(fn,V)
print('{} : {}'.format(fn,' '.join([str(x) for x in fp.tolist()])))
|
{"hexsha": "9f8de001a096c3d79267f2c8ba407494d538a305", "size": 33549, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycbh/utils.py", "max_stars_repo_name": "colliner/pyCBH", "max_stars_repo_head_hexsha": "29fb86d81cf45a343cb8e7e071d183bf74bb1c21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pycbh/utils.py", "max_issues_repo_name": "colliner/pyCBH", "max_issues_repo_head_hexsha": "29fb86d81cf45a343cb8e7e071d183bf74bb1c21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pycbh/utils.py", "max_forks_repo_name": "colliner/pyCBH", "max_forks_repo_head_hexsha": "29fb86d81cf45a343cb8e7e071d183bf74bb1c21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6499498495, "max_line_length": 708, "alphanum_fraction": 0.5857104534, "include": true, "reason": "import numpy", "num_tokens": 10244}
|
# Routines for an unstructured mesh that is contained in a hierarchical, rectangularly
# partitioned mesh data structure
mutable struct HierarchicalRectangularlyPartitionedMesh
name::String
rect::Rectangle_2D
mesh::Ref{UnstructuredMesh_2D}
parent::Ref{HierarchicalRectangularlyPartitionedMesh}
children::Vector{Ref{HierarchicalRectangularlyPartitionedMesh}}
end
function HierarchicalRectangularlyPartitionedMesh(;
name::String = "DefaultName",
rect::Rectangle_2D = Rectangle_2D(),
mesh::Ref{UnstructuredMesh_2D} = Ref{UnstructuredMesh_2D}(),
parent::Ref{HierarchicalRectangularlyPartitionedMesh}
= Ref{HierarchicalRectangularlyPartitionedMesh}(),
children::Vector{Ref{HierarchicalRectangularlyPartitionedMesh}}
= Ref{HierarchicalRectangularlyPartitionedMesh}[]
)
this = HierarchicalRectangularlyPartitionedMesh(name, rect, mesh, parent, children)
# If this mesh has a parent, add this mesh to the parent's children vector
if isassigned(parent)
push!(parent[].children, Ref(this))
end
return this
end
Base.broadcastable(HRPM::HierarchicalRectangularlyPartitionedMesh) = Ref(HRPM)
# Add the boundary edges to each mesh in the HRPM
# Type-stable
function add_boundary_edges!(HRPM::HierarchicalRectangularlyPartitionedMesh)
apply_function_recursively_to_HRPM_meshes!(add_boundary_edges, HRPM)
end
# Add the connectivity to each mesh in the HRPM
# Type-stable
function add_connectivity!(HRPM::HierarchicalRectangularlyPartitionedMesh)
apply_function_recursively_to_HRPM_meshes!(add_connectivity, HRPM)
end
# Add the edges to each mesh in the HRPM
# Type-stable
function add_edges!(HRPM::HierarchicalRectangularlyPartitionedMesh)
apply_function_recursively_to_HRPM_meshes!(add_edges, HRPM)
end
# Add every field to each mesh in the HRPM
# Type-stable
function add_everything!(HRPM::HierarchicalRectangularlyPartitionedMesh)
apply_function_recursively_to_HRPM_meshes!(add_everything, HRPM)
end
# Add materialized edges to each mesh in the HRPM
# Type-stable
function add_materialized_edges!(HRPM::HierarchicalRectangularlyPartitionedMesh)
apply_function_recursively_to_HRPM_meshes!(add_materialized_edges, HRPM)
end
# Add materialized faces to each mesh in the HRPM
# Type-stable
function add_materialized_faces!(HRPM::HierarchicalRectangularlyPartitionedMesh)
apply_function_recursively_to_HRPM_meshes!(add_materialized_faces, HRPM)
end
# Apply a function, f, to each of the meshes in the HRPM
function apply_function_recursively_to_HRPM_meshes!(f::Function,
HRPM::HierarchicalRectangularlyPartitionedMesh)
nchildren = length(HRPM.children)
if isassigned(HRPM.mesh)
HRPM.mesh[] = f(HRPM.mesh[])
elseif 0 < nchildren
for ichild = 1:nchildren
apply_function_recursively_to_HRPM_meshes!(f, HRPM.children[ichild][])
end
end
return nothing
end
# Apply a mutating function, f!, to each of the meshes in the HRPM
function apply_mutating_function_recursively_to_HRPM_meshes!(f::Function,
HRPM::HierarchicalRectangularlyPartitionedMesh)
nchildren = length(HRPM.children)
if isassigned(HRPM.mesh)
f(HRPM.mesh[])
elseif 0 < nchildren
for ichild = 1:nchildren
apply_mutating_function_recursively_to_HRPM_meshes!(f, HRPM.children[ichild][])
end
end
return nothing
end
# Return the axis-aligned bounding box of the HRPM
function boundingbox(HRPM::HierarchicalRectangularlyPartitionedMesh)
if HRPM.rect !== Rectangle_2D()
return HRPM.rect
elseif isassigned(HRPM.mesh)
HRPM.rect = boundingbox(HRPM.mesh[], boundary_shape="Rectangle")
return HRPM.rect
elseif 0 < length(HRPM.children)
HRPM.rect = mapreduce(x->boundingbox(x[]), union, HRPM.children)
return HRPM.rect
else
@error "Something went wrong"
return Rectangle_2D()
end
end
# # Fill a statically sized, mutable vector (coord) with the necessary indices to navigate from the
# # root HRPM, through the children, to the base mesh, and return the face ID to which the point p
# # may be found in.
# # Example:
# # For an HRPM with 4 levels:
# # [1, 2, 1, 16]
# # denotes HRPM.children[1][].children[2][].children[1][].mesh[].faces[16] contains p
# # If the face is found, return true. Otherwise, return false
# function find_face!(p::Point_2D, coord::MVector{N, UInt32},
# HRPM::HierarchicalRectangularlyPartitionedMesh)
# in_rect = p ∈ HRPM.rect
# if !in_rect
# return false
# elseif in_rect && (0 < length(HRPM.children))
# for (i, child) in enumerate(HRPM.children)
# in_child = find_face!(p, coord, child[])
# if in_child
# coord[findfirst(x->x==0, coord)] = U(i)
# return true
# end
# end
# return false
# elseif in_rect && isassigned(HRPM.mesh)
# face = find_face(p, HRPM.mesh[]::UnstructuredMesh_2D{F, U})
# coord[findfirst(x->x==0, coord)] = face
# reverse!(coord)
# return face == 0 ? false : true
# end
# return false
# end
#
# # Get the intersection algorithm that will be used for l ∩ HRPM
# # Not type-stable
# function get_intersection_algorithm(HRPM::HierarchicalRectangularlyPartitionedMesh)
# if isassigned(HRPM.mesh)
# return get_intersection_algorithm(HRPM.mesh[])
# else
# return get_intersection_algorithm(HRPM.children[1][])
# end
# end
#
# # Check if the HRPM's mesh faces are materialized
# # Not type-stable
# function has_materialized_faces(HRPM::HierarchicalRectangularlyPartitionedMesh)
# if isassigned(HRPM.mesh)
# if length(HRPM.mesh[].materialized_faces) !== 0
# return true
# else
# return false
# end
# else
# return has_materialized_faces(HRPM.children[1][])
# end
# end
#
# # Intersection a line with the HRPM. Returns a vector of points, ordered by distance from
# # the line's start point
# # Not type-stable
# function intersect(l::LineSegment_2D{F},
# HRPM::HierarchicalRectangularlyPartitionedMesh{F, U}) where {F <: AbstractFloat,
# U <: Unsigned}
# # An array to hold all of the intersection points
# intersection_points = Point_2D{F}[]
# nchildren = length(HRPM.children)
# if 0 < (l ∩ HRPM.rect)[1]
# if isassigned(HRPM.mesh)
# append!(intersection_points, l ∩ HRPM.mesh[]::UnstructuredMesh_2D{F,U})
# elseif 0 < nchildren
# for ichild = 1:nchildren
# append!(intersection_points,
# l ∩ HRPM.children[ichild][]::HierarchicalRectangularlyPartitionedMesh{F, U})
# end
# return sort_points(l.points[1], intersection_points)
# end
# end
# return intersection_points
# end
#
# Return the height of the HRPM (number of edges between this node and the leaf)
# A H = 2
# / \
# / \
# B C H = 1
# / \ / \
# D E F G H = 0
# Not type-stable
function node_height(HRPM::HierarchicalRectangularlyPartitionedMesh)
if length(HRPM.children) === 0
return 0
elseif 0 < length(HRPM.children)
return node_height(HRPM.children[1][]) + 1
else
@error "Something went wrong"
return -100
end
end
# Get the level (distance from current node to root + 1) of the HRPM
# A L = 1
# / \
# / \
# B C L = 2
# / \ / \
# D E F G L = 3
# Not type-stable
function node_level(HRPM::HierarchicalRectangularlyPartitionedMesh; current_level::Int64=1)
if isassigned(HRPM.parent)
return node_level(HRPM.parent[]; current_level = current_level + 1)
else
return current_level
end
end
# Partition a mesh into an HRPM based upon the names of its face sets.
# Must contain face sets of the form "GRID_LN_X_Y" where N,X,Y are integers
# N is the level of the node and X,Y are indices of the mesh's location in a rectangular
# grid
function partition_rectangularly(mesh::UnstructuredMesh_2D)
@info "Converting $(typeof(mesh)) into HierarchicalRectangularlyPartitionedMesh"
# Extract set names, grid names, and max level
set_names, grid_names, max_level = process_partition_rectangularly_input(mesh)
# Create a tree to store grid relationships.
root = create_HRPM_tree(mesh, grid_names, max_level)
# Construct the leaf meshes
leaf_meshes = create_HRPM_leaf_meshes(mesh, grid_names, max_level)
# Construct the mesh hierarchy
HRPM = create_HRPM(root, leaf_meshes)
return HRPM
end
# How to display the HRPM in the REPL
function Base.show(io::IO, HRPM::HierarchicalRectangularlyPartitionedMesh; relative_offset::Int64 = 0)
println(io, "HierarchicalRectangularlyPartitionedMesh")
println(io, " ├─ Name : $(HRPM.name)")
println(io, " ├─ Bounding Box : $(HRPM.rect)")
println(io, " ├─ Mesh : $(isassigned(HRPM.mesh))")
println(io, " ├─ Children : $(length(HRPM.children))")
if isassigned(HRPM.parent)
println(io, " └─ Parent : $(HRPM.parent[].name)")
else
println(io, " └─ Parent : None")
end
end
# Functions used in partition_rectangularly
# ------------------------------------------------------------------------------------------------------
function attach_HRPM_children!(HRPM::HierarchicalRectangularlyPartitionedMesh,
tree::Tree,
leaf_meshes::Vector{<:UnstructuredMesh_2D})
for child in tree.children
name = child[].data
child_mesh = HierarchicalRectangularlyPartitionedMesh(name = name,
parent = Ref(HRPM) )
for leaf_mesh in leaf_meshes
if name == leaf_mesh.name
child_mesh.mesh[] = leaf_mesh
end
end
attach_HRPM_children!(child_mesh, child[], leaf_meshes)
end
return nothing
end
# Construct the HRPM
function create_HRPM(tree::Tree, leaf_meshes::Vector{UnstructuredMesh_2D})
# Construct the HRPM from the top down
root = HierarchicalRectangularlyPartitionedMesh( name = tree.data )
attach_HRPM_children!(root, tree, leaf_meshes)
# Add the rectangles
boundingbox(root)
return root
end
# Construct the leaf meshes
function create_HRPM_leaf_meshes(mesh::UnstructuredMesh_2D, grid_names::Vector{String}, max_level::Int64)
# Generate the leaf meshes (The smallest spatially)
leaf_meshes = UnstructuredMesh_2D[]
leaf_names = String[]
for name in grid_names
level = parse(Int64, name[7])
if level == max_level
push!(leaf_names, name)
end
end
for name in leaf_names
push!(leaf_meshes, submesh(name, mesh))
end
# remove grid levels
for leaf_mesh in leaf_meshes
for name in keys(leaf_mesh.face_sets)
if occursin("GRID_", uppercase(name))
delete!(leaf_mesh.face_sets, name)
end
end
end
return leaf_meshes
end
# Create a tree to store grid relationships.
function create_HRPM_tree(mesh::UnstructuredMesh_2D, grid_names::Vector{String}, max_level::Int64)
root = Tree( data = mesh.name )
current_nodes = Tree[]
next_nodes = Tree[]
old_grid_names = copy(grid_names)
new_grid_names = copy(grid_names)
# Do first level
for grid_name in old_grid_names
grid_level = parse(Int64, grid_name[7])
if grid_level === 1
# Add to appropriate node (root)
push!(next_nodes, Tree(data = grid_name; parent=Ref(root)))
filter!(x->x ≠ grid_name, new_grid_names)
end
end
# Do all other levels:
for level in 2:max_level
old_grid_names = copy(new_grid_names)
current_nodes = next_nodes
next_nodes = []
for grid_name in old_grid_names
grid_level = parse(Int64, grid_name[7])
if grid_level == level
# find the parent for this grid
grid_faces = mesh.face_sets[grid_name]
for node in current_nodes
node_faces = mesh.face_sets[node.data]
if grid_faces ⊆ node_faces
push!(next_nodes, Tree(data = grid_name, parent=Ref(node)))
filter!(x->x ≠ grid_name, new_grid_names)
break
end
end
end
end
end
return root
end
# Us this the last child in the parent's list of children?
# offset determines if the nth-parent is the last child
function is_last_child(HRPM::HierarchicalRectangularlyPartitionedMesh; relative_offset::Int64=0)
if !isassigned(HRPM.parent)
return true
end
if relative_offset > 0
return is_last_child(HRPM.parent[]; relative_offset=relative_offset-1)
else
nsiblings = length(HRPM.parent[].children) - 1
return (HRPM.parent[].children[nsiblings + 1][] == HRPM)
end
end
# Extract set names, grid names, and max level
function process_partition_rectangularly_input(mesh::UnstructuredMesh_2D)
set_names = collect(keys(mesh.face_sets))
grid_names = copy(set_names)
for set_name in set_names
if !occursin("GRID_", uppercase(set_name))
filter!(x->x ≠ set_name, grid_names)
end
end
if length(grid_names) === 0
@error "No grid face sets in mesh"
end
# Get the number of grid levels
max_level = 0
for grid_name in grid_names
level = parse(Int64, grid_name[7])
if max_level < level
max_level = level
end
end
return set_names, grid_names, max_level
end
# Plot
# -------------------------------------------------------------------------------------------------
if enable_visualization
function linesegments!(HRPM::HierarchicalRectangularlyPartitionedMesh)
apply_mutating_function_recursively_to_HRPM_meshes!(linesegments!, HRPM)
end
end
|
{"hexsha": "9dd7ed9b20e917afefe5cf46ba7887ce5bde7d4e", "size": 14368, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/mesh/HierarchicalRectangularlyPartitionedMesh.jl", "max_stars_repo_name": "KyleVaughn/MOCNeutronTransport", "max_stars_repo_head_hexsha": "6de0f5987c2b37c3c3039d073b63c223ff6cd5f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-11-10T19:36:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-18T15:34:40.000Z", "max_issues_repo_path": "src/mesh/HierarchicalRectangularlyPartitionedMesh.jl", "max_issues_repo_name": "KyleVaughn/MOCNeutronTransport", "max_issues_repo_head_hexsha": "6de0f5987c2b37c3c3039d073b63c223ff6cd5f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2022-01-20T03:03:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T18:29:33.000Z", "max_forks_repo_path": "src/mesh/HierarchicalRectangularlyPartitionedMesh.jl", "max_forks_repo_name": "KyleVaughn/MOCNeutronTransport", "max_forks_repo_head_hexsha": "6de0f5987c2b37c3c3039d073b63c223ff6cd5f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1914357683, "max_line_length": 105, "alphanum_fraction": 0.6483157016, "num_tokens": 3660}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.