repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/utils.py
|
from typing import Optional, Tuple
import holidays
import pandas as pd
class HolidayUtil:
def __init__(self, country="US"):
try:
country, subdivision = self.convert_to_subdivision(country)
self.holidays = holidays.country_holidays(
country=country,
subdiv=subdivision,
)
except NotImplementedError:
available_countries = (
"https://github.com/dr-prodigy/python-holidays#available-countries"
)
error = "must be one of the available countries:\n%s" % available_countries
raise ValueError(error)
self.federal_holidays = getattr(holidays, country)(years=range(1950, 2075))
def to_df(self):
holidays_df = pd.DataFrame(
sorted(self.federal_holidays.items()),
columns=["holiday_date", "names"],
)
holidays_df.holiday_date = holidays_df.holiday_date.astype("datetime64[ns]")
return holidays_df
def convert_to_subdivision(self, country: str) -> Tuple[str, Optional[str]]:
"""Convert country to country + subdivision
Created in response to library changes that changed countries to subdivisions
Args:
country (str): Original country name
Returns:
Tuple[str,Optional[str]]: country, subdivsion
"""
return {
"ENGLAND": ("GB", country),
"NORTHERNIRELAND": ("GB", country),
"PORTUGALEXT": ("PT", "Ext"),
"PTE": ("PT", "Ext"),
"SCOTLAND": ("GB", country),
"UK": ("GB", country),
"WALES": ("GB", country),
}.get(country.upper(), (country, None))
| 1,730
| 32.288462
| 88
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/is_working_hours.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsWorkingHours(TransformPrimitive):
"""Determines if a datetime falls during working hours on a 24-hour clock. Can configure start_hour and end_hour.
Args:
start_hour (int): Start hour of workday. Must adhere to 24-hour clock. Default is 8 (8am).
end_hour (int): End hour of workday. Must adhere to 24-hour clock. Default is 18 (6pm).
Examples:
>>> import numpy as np
>>> from datetime import datetime
>>> dates = [datetime(2022, 6, 21, 16, 3, 3),
... datetime(2019, 1, 3, 4, 4, 4),
... datetime(2022, 1, 1, 12, 1, 2),
... np.nan]
>>> is_working_hour = IsWorkingHours()
>>> is_working_hour(dates).tolist()
[True, False, True, False]
>>> is_working_hour = IsWorkingHours(15, 17)
>>> is_working_hour(dates).tolist()
[True, False, False, False]
"""
name = "is_working_hours"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} falls during working hours"
def __init__(self, start_hour=8, end_hour=18):
self.start_hour = start_hour
self.end_hour = end_hour
def get_function(self):
def is_working_hours(vals):
return (vals.dt.hour >= self.start_hour) & (vals.dt.hour <= self.end_hour)
return is_working_hours
| 1,718
| 37.2
| 117
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/date_to_holiday.py
|
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.datetime.utils import HolidayUtil
class DateToHoliday(TransformPrimitive):
"""Transforms time of an instance into the holiday name, if there is one.
Description:
If there is no holiday, it returns `NaN`. Currently only works for the
United States and Canada with dates between 1950 and 2100.
Args:
country (str): Country to use for determining Holidays.
Default is 'US'. Should be one of the available countries here:
https://github.com/dr-prodigy/python-holidays#available-countries
Examples:
>>> from datetime import datetime
>>> date_to_holiday = DateToHoliday()
>>> dates = pd.Series([datetime(2016, 1, 1),
... datetime(2016, 2, 27),
... datetime(2017, 5, 29, 10, 30, 5),
... datetime(2018, 7, 4)])
>>> date_to_holiday(dates).tolist()
["New Year's Day", nan, 'Memorial Day', 'Independence Day']
We can also change the country.
>>> date_to_holiday_canada = DateToHoliday(country='Canada')
>>> dates = pd.Series([datetime(2016, 7, 1),
... datetime(2016, 11, 15),
... datetime(2017, 12, 26),
... datetime(2018, 9, 3)])
>>> date_to_holiday_canada(dates).tolist()
['Canada Day', nan, 'Boxing Day', 'Labour Day']
"""
name = "date_to_holiday"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def __init__(self, country="US"):
self.country = country
self.holidayUtil = HolidayUtil(country)
def get_function(self):
def date_to_holiday(x):
holiday_df = self.holidayUtil.to_df()
df = pd.DataFrame({"date": x})
df["date"] = df["date"].dt.date.astype("datetime64[ns]")
df = df.merge(
holiday_df,
how="left",
left_on="date",
right_on="holiday_date",
)
return df.names.values
return date_to_holiday
| 2,356
| 35.261538
| 84
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/is_month_start.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsMonthStart(TransformPrimitive):
"""Determines the is_month_start attribute of a datetime column.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2020, 2, 13),
... datetime(2020, 2, 29)]
>>> ims = IsMonthStart()
>>> ims(dates).tolist()
[True, False, False]
"""
name = "is_month_start"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} is at the start of a month"
def get_function(self):
def is_month_start(vals):
return vals.dt.is_month_start
return is_month_start
| 1,035
| 31.375
| 68
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/part_of_day.py
|
import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class PartOfDay(TransformPrimitive):
"""Determines the part of day of a datetime.
Description:
For a list of datetimes, determines the part of day the datetime
falls into, based on the hour.
If the hour falls from 4 to 5, the part of day is 'dawn'.
If the hour falls from 6 to 7, the part of day is 'early morning'.
If the hour falls from 8 to 10, the part of day is 'late morning'.
If the hour falls from 11 to 13, the part of day is 'noon'.
If the hour falls from 14 to 16, the part of day is 'afternoon'.
If the hour falls from 17 to 19, the part of day is 'evening'.
If the hour falls from 20 to 22, the part of day is 'night'.
If the hour falls into 23, 24, or 1 to 3, the part of day is 'midnight'.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2020, 1, 11, 6, 2, 1),
... datetime(2021, 3, 31, 4, 2, 1),
... datetime(2020, 3, 4, 9, 2, 1)]
>>> part_of_day = PartOfDay()
>>> part_of_day(dates).tolist()
['early morning', 'dawn', 'late morning']
"""
name = "part_of_day"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the part of day {} falls in"
@staticmethod
def construct_replacement_dict():
tdict = dict()
tdict[pd.NaT] = np.nan
for hour in [4, 5]:
tdict[hour] = "dawn"
for hour in [6, 7]:
tdict[hour] = "early morning"
for hour in [8, 9, 10]:
tdict[hour] = "late morning"
for hour in [11, 12, 13]:
tdict[hour] = "noon"
for hour in [14, 15, 16]:
tdict[hour] = "afternoon"
for hour in [17, 18, 19]:
tdict[hour] = "evening"
for hour in [20, 21, 22]:
tdict[hour] = "night"
for hour in [23, 0, 1, 2, 3]:
tdict[hour] = "midnight"
return tdict
def get_function(self):
replacement_dict = self.construct_replacement_dict()
def part_of_day(vals):
ans = vals.dt.hour.replace(replacement_dict)
return ans
return part_of_day
| 2,609
| 35.760563
| 84
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/date_to_timezone.py
|
import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, Datetime
from featuretools.primitives.base import TransformPrimitive
class DateToTimeZone(TransformPrimitive):
"""Determines the timezone of a datetime.
Description:
Given a list of datetimes, extract the timezone from each
one. Looks for the `tzinfo` attribute on `datetime.datetime`
objects. If the datetime has no timezone or the date is
missing, return `NaN`.
Examples:
>>> from datetime import datetime
>>> from pytz import timezone
>>> date_to_time_zone = DateToTimeZone()
>>> dates = [datetime(2010, 1, 1, tzinfo=timezone("America/Los_Angeles")),
... datetime(2010, 1, 1, tzinfo=timezone("America/New_York")),
... datetime(2010, 1, 1, tzinfo=timezone("America/Chicago")),
... datetime(2010, 1, 1)]
>>> date_to_time_zone(dates).tolist()
['America/Los_Angeles', 'America/New_York', 'America/Chicago', nan]
"""
name = "date_to_time_zone"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def get_function(self):
def date_to_time_zone(x):
return x.apply(lambda x: x.tzinfo.zone if x.tzinfo else np.nan)
return date_to_time_zone
| 1,428
| 36.605263
| 84
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/days_in_month.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class DaysInMonth(TransformPrimitive):
"""Determines the number of days in the month of given datetime.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 12, 1),
... datetime(2019, 1, 3),
... datetime(2020, 2, 1)]
>>> days_in_month = DaysInMonth()
>>> days_in_month(dates).tolist()
[31, 31, 29]
"""
name = "days_in_month"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(1, 32))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the days in the month of {}"
def get_function(self):
def days_in_month(vals):
return vals.dt.daysinmonth
return days_in_month
| 1,089
| 30.142857
| 68
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/is_federal_holiday.py
|
import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.datetime.utils import HolidayUtil
class IsFederalHoliday(TransformPrimitive):
"""Determines if a given datetime is a federal holiday.
Description:
This primtive currently only works for the United States
and Canada with dates between 1950 and 2100.
Args:
country (str): Country to use for determining Holidays.
Default is 'US'. Should be one of the available countries here:
https://github.com/dr-prodigy/python-holidays#available-countries
Examples:
>>> from datetime import datetime
>>> is_federal_holiday = IsFederalHoliday(country="US")
>>> is_federal_holiday([
... datetime(2019, 7, 4, 10, 0, 30),
... datetime(2019, 2, 26)]).tolist()
[True, False]
"""
name = "is_federal_holiday"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
def __init__(self, country="US"):
self.country = country
self.holidayUtil = HolidayUtil(country)
def get_function(self):
def is_federal_holiday(x):
holidays_df = self.holidayUtil.to_df()
is_holiday = x.dt.normalize().isin(holidays_df.holiday_date)
if x.isnull().values.any():
is_holiday = is_holiday.astype("object")
is_holiday[x.isnull()] = np.nan
return is_holiday.values
return is_federal_holiday
| 1,697
| 34.375
| 81
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/day_of_year.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class DayOfYear(TransformPrimitive):
"""Determines the ordinal day of the year from the given datetime
Description:
For a list of dates, return the ordinal day of the year
from the given datetime.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 1, 1),
... datetime(2020, 12, 31),
... datetime(2020, 2, 28)]
>>> dayOfYear = DayOfYear()
>>> dayOfYear(dates).tolist()
[1, 366, 59]
"""
name = "day_of_year"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(1, 367))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the day of year from {}"
def get_function(self):
def dayOfYear(vals):
return vals.dt.dayofyear
return dayOfYear
| 1,180
| 29.282051
| 69
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/weekday.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Weekday(TransformPrimitive):
"""Determines the day of the week from a datetime.
Description:
Returns the day of the week from a datetime value. Weeks
start on Monday (day 0) and run through Sunday (day 6).
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2019, 6, 17, 11, 10, 50),
... datetime(2019, 11, 30, 19, 45, 15)]
>>> weekday = Weekday()
>>> weekday(dates).tolist()
[4, 0, 5]
"""
name = "weekday"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(7))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the day of the week of {}"
def get_function(self):
def weekday(vals):
return vals.dt.weekday
return weekday
| 1,197
| 29.717949
| 65
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/is_quarter_end.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsQuarterEnd(TransformPrimitive):
"""Determines the is_quarter_end attribute of a datetime column.
Examples:
>>> from datetime import datetime
>>> iqe = IsQuarterEnd()
>>> dates = [datetime(2020, 3, 31),
... datetime(2020, 1, 1)]
>>> iqe(dates).tolist()
[True, False]
"""
name = "is_quarter_end"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} is a quarter end"
def get_function(self):
def is_quarter_end(vals):
return vals.dt.is_quarter_end
return is_quarter_end
| 974
| 30.451613
| 68
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/is_year_start.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsYearStart(TransformPrimitive):
"""Determines if a date falls on the start of a year.
Examples:
>>> import numpy as np
>>> from datetime import datetime
>>> dates = [datetime(2019, 12, 31),
... datetime(2019, 1, 1),
... datetime(2019, 11, 30),
... np.nan]
>>> is_year_start = IsYearStart()
>>> is_year_start(dates).tolist()
[False, True, False, False]
"""
name = "is_year_start"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} occurred on the start of a year"
def get_function(self):
def is_year_start(vals):
return vals.dt.is_year_start
return is_year_start
| 1,112
| 31.735294
| 71
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/hour.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Hour(TransformPrimitive):
"""Determines the hour value of a datetime.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2019, 3, 3, 11, 10, 50),
... datetime(2019, 3, 31, 19, 45, 15)]
>>> hour = Hour()
>>> hour(dates).tolist()
[0, 11, 19]
"""
name = "hour"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(24))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the hour value of {}"
def get_function(self):
def hour(vals):
return vals.dt.hour
return hour
| 1,015
| 28.028571
| 65
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/day.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Day(TransformPrimitive):
"""Determines the day of the month from a datetime.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2019, 3, 3),
... datetime(2019, 3, 31)]
>>> day = Day()
>>> day(dates).tolist()
[1, 3, 31]
"""
name = "day"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(1, 32))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the day of the month of {}"
def get_function(self):
def day(vals):
return vals.dt.day
return day
| 999
| 27.571429
| 65
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/is_quarter_start.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsQuarterStart(TransformPrimitive):
"""Determines the is_quarter_start attribute of a datetime column.
Examples:
>>> from datetime import datetime
>>> iqs = IsQuarterStart()
>>> dates = [datetime(2020, 3, 31),
... datetime(2020, 1, 1)]
>>> iqs(dates).tolist()
[False, True]
"""
name = "is_quarter_start"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} is a quarter start"
def get_function(self):
def is_quarter_start(vals):
return vals.dt.is_quarter_start
return is_quarter_start
| 990
| 30.967742
| 70
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/__init__.py
|
from featuretools.primitives.standard.transform.datetime.age import Age
from featuretools.primitives.standard.transform.datetime.date_to_holiday import (
DateToHoliday,
)
from featuretools.primitives.standard.transform.datetime.date_to_timezone import (
DateToTimeZone,
)
from featuretools.primitives.standard.transform.datetime.day import Day
from featuretools.primitives.standard.transform.datetime.day_of_year import DayOfYear
from featuretools.primitives.standard.transform.datetime.days_in_month import (
DaysInMonth,
)
from featuretools.primitives.standard.transform.datetime.diff_datetime import (
DiffDatetime,
)
from featuretools.primitives.standard.transform.datetime.distance_to_holiday import (
DistanceToHoliday,
)
from featuretools.primitives.standard.transform.datetime.hour import Hour
from featuretools.primitives.standard.transform.datetime.is_first_week_of_month import (
IsFirstWeekOfMonth,
)
from featuretools.primitives.standard.transform.datetime.is_federal_holiday import (
IsFederalHoliday,
)
from featuretools.primitives.standard.transform.datetime.is_leap_year import IsLeapYear
from featuretools.primitives.standard.transform.datetime.is_lunch_time import (
IsLunchTime,
)
from featuretools.primitives.standard.transform.datetime.is_month_end import IsMonthEnd
from featuretools.primitives.standard.transform.datetime.is_month_start import (
IsMonthStart,
)
from featuretools.primitives.standard.transform.datetime.is_quarter_end import (
IsQuarterEnd,
)
from featuretools.primitives.standard.transform.datetime.is_quarter_start import (
IsQuarterStart,
)
from featuretools.primitives.standard.transform.datetime.is_weekend import IsWeekend
from featuretools.primitives.standard.transform.datetime.is_working_hours import (
IsWorkingHours,
)
from featuretools.primitives.standard.transform.datetime.is_year_end import IsYearEnd
from featuretools.primitives.standard.transform.datetime.is_year_start import (
IsYearStart,
)
from featuretools.primitives.standard.transform.datetime.minute import Minute
from featuretools.primitives.standard.transform.datetime.month import Month
from featuretools.primitives.standard.transform.datetime.part_of_day import PartOfDay
from featuretools.primitives.standard.transform.datetime.quarter import Quarter
from featuretools.primitives.standard.transform.datetime.season import Season
from featuretools.primitives.standard.transform.datetime.second import Second
from featuretools.primitives.standard.transform.datetime.time_since import TimeSince
from featuretools.primitives.standard.transform.datetime.time_since_previous import (
TimeSincePrevious,
)
from featuretools.primitives.standard.transform.datetime.week import Week
from featuretools.primitives.standard.transform.datetime.weekday import Weekday
from featuretools.primitives.standard.transform.datetime.year import Year
| 2,903
| 46.606557
| 88
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/time_since.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils import convert_time_units
from featuretools.utils.gen_utils import Library
class TimeSince(TransformPrimitive):
"""Calculates time from a value to a specified cutoff datetime.
Args:
unit (str): Defines the unit of time to count from.
Defaults to Seconds. Acceptable values:
years, months, days, hours, minutes, seconds, milliseconds, nanoseconds
Examples:
>>> from datetime import datetime
>>> time_since = TimeSince()
>>> times = [datetime(2019, 3, 1, 0, 0, 0, 1),
... datetime(2019, 3, 1, 0, 0, 1, 0),
... datetime(2019, 3, 1, 0, 2, 0, 0)]
>>> cutoff_time = datetime(2019, 3, 1, 0, 0, 0, 0)
>>> values = time_since(times, time=cutoff_time)
>>> list(map(int, values))
[0, -1, -120]
Change output to nanoseconds
>>> from datetime import datetime
>>> time_since_nano = TimeSince(unit='nanoseconds')
>>> times = [datetime(2019, 3, 1, 0, 0, 0, 1),
... datetime(2019, 3, 1, 0, 0, 1, 0),
... datetime(2019, 3, 1, 0, 2, 0, 0)]
>>> cutoff_time = datetime(2019, 3, 1, 0, 0, 0, 0)
>>> values = time_since_nano(times, time=cutoff_time)
>>> list(map(lambda x: int(round(x)), values))
[-1000, -1000000000, -120000000000]
"""
name = "time_since"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_calc_time = True
compatibility = [Library.PANDAS, Library.DASK]
description_template = "the time from {} to the cutoff time"
def __init__(self, unit="seconds"):
self.unit = unit.lower()
def get_function(self):
def pd_time_since(array, time):
return convert_time_units((time - array).dt.total_seconds(), self.unit)
return pd_time_since
| 2,070
| 35.982143
| 83
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/is_month_end.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsMonthEnd(TransformPrimitive):
"""Determines the is_month_end attribute of a datetime column.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2021, 2, 28),
... datetime(2020, 2, 29)]
>>> ime = IsMonthEnd()
>>> ime(dates).tolist()
[False, True, True]
"""
name = "is_month_end"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} is at the end of a month"
def get_function(self):
def is_month_end(vals):
return vals.dt.is_month_end
return is_month_end
| 1,018
| 30.84375
| 66
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/diff_datetime.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Timedelta
from featuretools.primitives.standard.transform.numeric.diff import Diff
class DiffDatetime(Diff):
"""Computes the timedelta between a datetime in a list and the
previous datetime in that list.
Args:
periods (int): The number of periods by which to shift the index row.
Default is 0. Periods correspond to rows.
Description:
Given a list of datetimes, compute the difference from the previous
item in the list. The result for the first element of the list will
always be `NaT`.
Examples:
>>> from datetime import datetime
>>> dt_values = [datetime(2019, 3, 1), datetime(2019, 6, 30), datetime(2019, 11, 17), datetime(2020, 1, 30), datetime(2020, 3, 11)]
>>> diff_dt = DiffDatetime()
>>> diff_dt(dt_values).tolist()
[NaT, Timedelta('121 days 00:00:00'), Timedelta('140 days 00:00:00'), Timedelta('74 days 00:00:00'), Timedelta('41 days 00:00:00')]
You can specify the number of periods to shift the values
>>> diff_dt_periods = DiffDatetime(periods = 1)
>>> diff_dt_periods(dt_values).tolist()
[NaT, NaT, Timedelta('121 days 00:00:00'), Timedelta('140 days 00:00:00'), Timedelta('74 days 00:00:00')]
"""
name = "diff_datetime"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Timedelta)
uses_full_dataframe = True
description_template = "the difference from the previous value of {}"
def __init__(self, periods=0):
super().__init__(periods)
| 1,668
| 38.738095
| 139
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/week.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Week(TransformPrimitive):
"""Determines the week of the year from a datetime.
Description:
Returns the week of the year from a datetime value. The first week
of the year starts on January 1, and week numbers increment each
Monday.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 1, 3),
... datetime(2019, 6, 17, 11, 10, 50),
... datetime(2019, 11, 30, 19, 45, 15)]
>>> week = Week()
>>> week(dates).tolist()
[1, 25, 48]
"""
name = "week"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(1, 54))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the week of the year of {}"
def get_function(self):
def week(vals):
if hasattr(vals.dt, "isocalendar"):
return vals.dt.isocalendar().week
else:
return vals.dt.week
return week
| 1,336
| 30.093023
| 74
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/is_leap_year.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsLeapYear(TransformPrimitive):
"""Determines the is_leap_year attribute of a datetime column.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2020, 3, 3, 11, 10, 50),
... datetime(2021, 3, 31, 19, 45, 15)]
>>> ily = IsLeapYear()
>>> ily(dates).tolist()
[False, True, False]
"""
name = "is_leap_year"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether the year of {} is a leap year"
def get_function(self):
def is_leap_year(vals):
return vals.dt.is_leap_year
return is_leap_year
| 1,044
| 31.65625
| 66
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/time_since_previous.py
|
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils import convert_time_units
class TimeSincePrevious(TransformPrimitive):
"""Computes the time since the previous entry in a list.
Args:
unit (str): Defines the unit of time to count from.
Defaults to Seconds. Acceptable values:
years, months, days, hours, minutes, seconds, milliseconds, nanoseconds
Description:
Given a list of datetimes, compute the time in seconds elapsed since
the previous item in the list. The result for the first item in the
list will always be `NaN`.
Examples:
>>> from datetime import datetime
>>> time_since_previous = TimeSincePrevious()
>>> dates = [datetime(2019, 3, 1, 0, 0, 0),
... datetime(2019, 3, 1, 0, 2, 0),
... datetime(2019, 3, 1, 0, 3, 0),
... datetime(2019, 3, 1, 0, 2, 30),
... datetime(2019, 3, 1, 0, 10, 0)]
>>> time_since_previous(dates).tolist()
[nan, 120.0, 60.0, -30.0, 450.0]
"""
name = "time_since_previous"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
description_template = "the time since the previous instance of {}"
def __init__(self, unit="seconds"):
self.unit = unit.lower()
def get_function(self):
def pd_diff(values):
return convert_time_units(
values.diff().apply(lambda x: x.total_seconds()),
self.unit,
)
return pd_diff
| 1,746
| 34.653061
| 85
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/season.py
|
from datetime import date
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, Datetime
from featuretools.primitives.base import TransformPrimitive
class Season(TransformPrimitive):
"""Determines the season of a given datetime.
Returns winter, spring, summer, or fall.
This only works for northern hemisphere.
Description:
Given a list of datetimes, return the season of each one
(`winter`, `spring`, `summer`, or `fall`).
Examples:
>>> from datetime import datetime
>>> times = [datetime(2019, 1, 1),
... datetime(2019, 4, 15),
... datetime(2019, 7, 20),
... datetime(2019, 12, 30)]
>>> season = Season()
>>> season(times).tolist()
['winter', 'spring', 'summer', 'winter']
"""
name = "season"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def get_function(self):
def season(x):
# https://stackoverflow.com/a/28688724/2512385
Y = 2000 # dummy leap year to allow input X-02-29 (leap day)
seasons = [
("winter", (date(Y, 1, 1), date(Y, 3, 20))),
("spring", (date(Y, 3, 21), date(Y, 6, 20))),
("summer", (date(Y, 6, 21), date(Y, 9, 22))),
("fall", (date(Y, 9, 23), date(Y, 12, 20))),
("winter", (date(Y, 12, 21), date(Y, 12, 31))),
]
x = x.apply(lambda x: x.replace(year=2000))
def get_season(dt):
for season, (start, end) in seasons:
if not pd.isna(dt) and start <= dt.date() <= end:
return season
return pd.NA
new = x.apply(get_season).astype(dtype="string")
return new
return season
| 1,968
| 33.54386
| 84
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/standard/transform/datetime/distance_to_holiday.py
|
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.datetime.utils import HolidayUtil
class DistanceToHoliday(TransformPrimitive):
"""Computes the number of days before or after a given holiday.
Description:
For a list of dates, return the distance from the nearest
occurrence of a chosen holiday. The distance is returned in
days. If the closest occurrence is prior to the date given,
return a negative number.
If a date is missing, return `NaN`.
Currently only works with dates between 1950 and 2100.
Args:
holiday (str): Name of the holiday. Defaults to New Year's Day.
country (str): Specifies which country's calendar to use for the
given holiday. Default is `US`.
Examples:
>>> from datetime import datetime
>>> distance_to_holiday = DistanceToHoliday("New Year's Day")
>>> dates = [datetime(2010, 1, 1),
... datetime(2012, 5, 31),
... datetime(2017, 7, 31),
... datetime(2020, 12, 31)]
>>> distance_to_holiday(dates).tolist()
[0, -151, 154, 1]
We can also control the country in which we're searching for
a holiday.
>>> distance_to_holiday = DistanceToHoliday("Victoria Day", country='Canada')
>>> dates = [datetime(2010, 1, 1),
... datetime(2012, 5, 31),
... datetime(2017, 7, 31),
... datetime(2020, 12, 31)]
>>> distance_to_holiday(dates).tolist()
[143, -10, -70, 144]
"""
name = "distance_to_holiday"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(semantic_tags={"numeric"})
default_value = 0
def __init__(self, holiday="New Year's Day", country="US"):
self.country = country
self.holiday = holiday
self.holidayUtil = HolidayUtil(country)
available_holidays = list(set(self.holidayUtil.federal_holidays.values()))
if self.holiday not in available_holidays:
error = "must be one of the available holidays:\n%s" % available_holidays
raise ValueError(error)
def get_function(self):
def distance_to_holiday(x):
holiday_df = self.holidayUtil.to_df()
holiday_df = holiday_df[holiday_df.names == self.holiday]
df = pd.DataFrame({"date": x})
df["x_index"] = df.index # store original index as a column
df = df.dropna()
df = df.sort_values("date")
df["date"] = df["date"].dt.date.astype("datetime64[ns]")
matches = pd.merge_asof(
df,
holiday_df,
left_on="date",
right_on="holiday_date",
direction="nearest",
tolerance=pd.Timedelta("365d"),
)
matches = matches.set_index("x_index")
matches["days_diff"] = (matches.holiday_date - matches.date).dt.days
return matches.days_diff.reindex_like(x)
return distance_to_holiday
| 3,274
| 35.388889
| 85
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/base/transform_primitive_base.py
|
from featuretools.primitives.base.primitive_base import PrimitiveBase
class TransformPrimitive(PrimitiveBase):
"""Feature for dataframe that is a based off one or more other features
in that dataframe."""
# (bool) If True, feature function depends on all values of dataframe
# (and will receive these values as input, regardless of specified instance ids)
uses_full_dataframe = False
def generate_name(self, base_feature_names):
return "%s(%s%s)" % (
self.name.upper(),
", ".join(base_feature_names),
self.get_args_string(),
)
def generate_names(self, base_feature_names):
n = self.number_output_features
base_name = self.generate_name(base_feature_names)
return [base_name + "[%s]" % i for i in range(n)]
| 817
| 34.565217
| 86
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/base/aggregation_primitive_base.py
|
from featuretools.primitives.base.primitive_base import PrimitiveBase
class AggregationPrimitive(PrimitiveBase):
def generate_name(
self,
base_feature_names,
relationship_path_name,
parent_dataframe_name,
where_str,
use_prev_str,
):
base_features_str = ", ".join(base_feature_names)
return "%s(%s.%s%s%s%s)" % (
self.name.upper(),
relationship_path_name,
base_features_str,
where_str,
use_prev_str,
self.get_args_string(),
)
def generate_names(
self,
base_feature_names,
relationship_path_name,
parent_dataframe_name,
where_str,
use_prev_str,
):
n = self.number_output_features
base_name = self.generate_name(
base_feature_names,
relationship_path_name,
parent_dataframe_name,
where_str,
use_prev_str,
)
return [base_name + "[%s]" % i for i in range(n)]
| 1,057
| 25.45
| 69
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/base/primitive_base.py
|
import os
from inspect import signature
import numpy as np
import pandas as pd
from featuretools import config
from featuretools.utils.description_utils import convert_to_nth
from featuretools.utils.gen_utils import Library
class PrimitiveBase(object):
"""Base class for all primitives."""
#: (str): Name of the primitive
name = None
#: (list): woodwork.ColumnSchema types of inputs
input_types = None
#: (woodwork.ColumnSchema): ColumnSchema type of return
return_type = None
#: Default value this feature returns if no data found. Defaults to np.nan
default_value = np.nan
#: (bool): True if feature needs to know what the current calculation time
# is (provided to computational backend as "time_last")
uses_calc_time = False
#: (int): Maximum number of features in the largest chain proceeding
# downward from this feature's base features.
max_stack_depth = None
#: (int): Number of columns in feature matrix associated with this feature
number_output_features = 1
# whitelist of primitives can have this primitive in input_types
base_of = None
# blacklist of primitives can have this primitive in input_types
base_of_exclude = None
# whitelist of primitives that can be in input_types
stack_on = None
# blacklist of primitives that can be in signature
stack_on_exclude = None
# determines if primitive can be in input_types for self
stack_on_self = True
# (bool) If True will only make one feature per unique set of base features
commutative = False
#: (list): Additional compatible libraries
compatibility = [Library.PANDAS]
#: (str, list[str]): description template of the primitive. Input column
# descriptions are passed as positional arguments to the template. Slice
# number (if present) in "nth" form is passed to the template via the
# `nth_slice` keyword argument. Multi-output primitives can use a list to
# differentiate between the base description and a slice description.
description_template = None
series_library = Library.PANDAS
def __init__(self):
pass
def __call__(self, *args, **kwargs):
series_args = [pd.Series(arg) for arg in args]
try:
return self._method(*series_args, **kwargs)
except AttributeError:
self._method = self.get_function()
return self._method(*series_args, **kwargs)
def __lt__(self, other):
return (self.name + self.get_args_string()) < (
other.name + other.get_args_string()
)
def generate_name(self):
raise NotImplementedError("Subclass must implement")
def generate_names(self):
raise NotImplementedError("Subclass must implement")
def get_function(self):
raise NotImplementedError("Subclass must implement")
def get_filepath(self, filename):
return os.path.join(config.get("primitive_data_folder"), filename)
def get_args_string(self):
strings = []
for name, value in self.get_arguments():
# format arg to string
string = "{}={}".format(name, str(value))
strings.append(string)
if len(strings) == 0:
return ""
string = ", ".join(strings)
string = ", " + string
return string
def get_arguments(self):
values = []
args = signature(self.__class__).parameters.items()
for name, arg in args:
# assert that arg is attribute of primitive
error = '"{}" must be attribute of {}'
assert hasattr(self, name), error.format(name, self.__class__.__name__)
value = getattr(self, name)
# check if args are the same type
if isinstance(value, type(arg.default)):
# skip if default value
if arg.default == value:
continue
values.append((name, value))
return values
def get_description(
self,
input_column_descriptions,
slice_num=None,
template_override=None,
):
template = template_override or self.description_template
if template:
if isinstance(template, list):
if slice_num is not None:
slice_index = slice_num + 1
if slice_index < len(template):
return template[slice_index].format(
*input_column_descriptions,
nth_slice=convert_to_nth(slice_index),
)
else:
if len(template) > 2:
raise IndexError("Slice out of range of template")
return template[1].format(
*input_column_descriptions,
nth_slice=convert_to_nth(slice_index),
)
else:
template = template[0]
return template.format(*input_column_descriptions)
# generic case:
name = self.name.upper() if self.name is not None else type(self).__name__
if slice_num is not None:
nth_slice = convert_to_nth(slice_num + 1)
description = "the {} output from applying {} to {}".format(
nth_slice,
name,
", ".join(input_column_descriptions),
)
else:
description = "the result of applying {} to {}".format(
name,
", ".join(input_column_descriptions),
)
return description
@staticmethod
def flatten_nested_input_types(input_types):
"""Flattens nested column schema inputs into a single list."""
if isinstance(input_types[0], list):
input_types = [
sub_input for input_obj in input_types for sub_input in input_obj
]
return input_types
| 6,014
| 35.23494
| 83
|
py
|
featuretools
|
featuretools-main/featuretools/primitives/base/__init__.py
|
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.primitives.base.primitive_base import PrimitiveBase
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
| 244
| 60.25
| 88
|
py
|
featuretools
|
featuretools-main/featuretools/selection/api.py
|
# flake8: noqa
from featuretools.selection.selection import *
| 62
| 20
| 46
|
py
|
featuretools
|
featuretools-main/featuretools/selection/selection.py
|
import pandas as pd
from woodwork.logical_types import Boolean, BooleanNullable
def remove_low_information_features(feature_matrix, features=None):
"""Select features that have at least 2 unique values and that are not all null
Args:
feature_matrix (:class:`pd.DataFrame`): DataFrame whose columns are feature names and rows are instances
features (list[:class:`featuretools.FeatureBase`] or list[str], optional): List of features to select
Returns:
(feature_matrix, features)
"""
keep = [
c
for c in feature_matrix
if (
feature_matrix[c].nunique(dropna=False) > 1
and feature_matrix[c].dropna().shape[0] > 0
)
]
feature_matrix = feature_matrix[keep]
if features is not None:
features = [f for f in features if f.get_name() in feature_matrix.columns]
return feature_matrix, features
return feature_matrix
def remove_highly_null_features(feature_matrix, features=None, pct_null_threshold=0.95):
"""
Removes columns from a feature matrix that have higher than a set threshold
of null values.
Args:
feature_matrix (:class:`pd.DataFrame`): DataFrame whose columns are feature names and rows are instances.
features (list[:class:`featuretools.FeatureBase`] or list[str], optional): List of features to select.
pct_null_threshold (float): If the percentage of NaN values in an input feature exceeds this amount,
that feature will be considered highly-null. Defaults to 0.95.
Returns:
pd.DataFrame, list[:class:`.FeatureBase`]:
The feature matrix and the list of generated feature definitions. Matches dfs output.
If no feature list is provided as input, the feature list will not be returned.
"""
if pct_null_threshold < 0 or pct_null_threshold > 1:
raise ValueError(
"pct_null_threshold must be a float between 0 and 1, inclusive.",
)
percent_null_by_col = (feature_matrix.isnull().mean()).to_dict()
if pct_null_threshold == 0.0:
keep = [
f_name
for f_name, pct_null in percent_null_by_col.items()
if pct_null <= pct_null_threshold
]
else:
keep = [
f_name
for f_name, pct_null in percent_null_by_col.items()
if pct_null < pct_null_threshold
]
return _apply_feature_selection(keep, feature_matrix, features)
def remove_single_value_features(
feature_matrix,
features=None,
count_nan_as_value=False,
):
"""Removes columns in feature matrix where all the values are the same.
Args:
feature_matrix (:class:`pd.DataFrame`): DataFrame whose columns are feature names and rows are instances.
features (list[:class:`featuretools.FeatureBase`] or list[str], optional): List of features to select.
count_nan_as_value (bool): If True, missing values will be counted as their own unique value.
If set to False, a feature that has one unique value and all other
data missing will be removed from the feature matrix. Defaults to False.
Returns:
pd.DataFrame, list[:class:`.FeatureBase`]:
The feature matrix and the list of generated feature definitions.
Matches dfs output.
If no feature list is provided as input, the feature list will not be returned.
"""
unique_counts_by_col = feature_matrix.nunique(
dropna=not count_nan_as_value,
).to_dict()
keep = [
f_name
for f_name, unique_count in unique_counts_by_col.items()
if unique_count > 1
]
return _apply_feature_selection(keep, feature_matrix, features)
def remove_highly_correlated_features(
feature_matrix,
features=None,
pct_corr_threshold=0.95,
features_to_check=None,
features_to_keep=None,
):
"""Removes columns in feature matrix that are highly correlated with another column.
Note:
We make the assumption that, for a pair of features, the feature that is further
right in the feature matrix produced by ``dfs`` is the more complex one.
The assumption does not hold if the order of columns in the feature
matrix has changed from what ``dfs`` produces.
Args:
feature_matrix (:class:`pd.DataFrame`): DataFrame whose columns are feature
names and rows are instances. If Woodwork is not initalized, will
perform Woodwork initialization, which may result in slightly different
types than those in the original feature matrix created by Featuretools.
features (list[:class:`featuretools.FeatureBase`] or list[str], optional):
List of features to select.
pct_corr_threshold (float): The correlation threshold to be considered highly
correlated. Defaults to 0.95.
features_to_check (list[str], optional): List of column names to check
whether any pairs are highly correlated. Will not check any
other columns, meaning the only columns that can be removed
are in this list. If null, defaults to checking all columns.
features_to_keep (list[str], optional): List of colum names to keep even
if correlated to another column. If null, all columns will be
candidates for removal.
Returns:
pd.DataFrame, list[:class:`.FeatureBase`]:
The feature matrix and the list of generated feature definitions.
Matches dfs output. If no feature list is provided as input,
the feature list will not be returned. For consistent results,
do not change the order of features outputted by dfs.
"""
if feature_matrix.ww.schema is None:
feature_matrix.ww.init()
if pct_corr_threshold < 0 or pct_corr_threshold > 1:
raise ValueError(
"pct_corr_threshold must be a float between 0 and 1, inclusive.",
)
if features_to_check is None:
features_to_check = list(feature_matrix.columns)
else:
for f_name in features_to_check:
assert (
f_name in feature_matrix.columns
), "feature named {} is not in feature matrix".format(f_name)
if features_to_keep is None:
features_to_keep = []
to_select = ["numeric", Boolean, BooleanNullable]
fm = feature_matrix.ww[features_to_check]
fm_to_check = fm.ww.select(include=to_select)
dropped = set()
columns_to_check = fm_to_check.columns
# When two features are found to be highly correlated,
# we drop the more complex feature
# Columns produced later in dfs are more complex
for i in range(len(columns_to_check) - 1, 0, -1):
more_complex_name = columns_to_check[i]
more_complex_col = fm_to_check[more_complex_name]
# Convert boolean or Int64 column to be float64
if pd.api.types.is_bool_dtype(more_complex_col) or isinstance(
more_complex_col.dtype,
pd.Int64Dtype,
):
more_complex_col = more_complex_col.astype("float64")
for j in range(i - 1, -1, -1):
less_complex_name = columns_to_check[j]
less_complex_col = fm_to_check[less_complex_name]
# Convert boolean or Int64 column to be float64
if pd.api.types.is_bool_dtype(less_complex_col) or isinstance(
less_complex_col.dtype,
pd.Int64Dtype,
):
less_complex_col = less_complex_col.astype("float64")
if abs(more_complex_col.corr(less_complex_col)) >= pct_corr_threshold:
dropped.add(more_complex_name)
break
keep = [
f_name
for f_name in feature_matrix.columns
if (f_name in features_to_keep or f_name not in dropped)
]
return _apply_feature_selection(keep, feature_matrix, features)
def _apply_feature_selection(keep, feature_matrix, features=None):
new_matrix = feature_matrix[keep]
new_feature_names = set(new_matrix.columns)
if features is not None:
new_features = []
for f in features:
if f.number_output_features > 1:
slices = [
f[i]
for i in range(f.number_output_features)
if f[i].get_name() in new_feature_names
]
if len(slices) == f.number_output_features:
new_features.append(f)
else:
new_features.extend(slices)
else:
if f.get_name() in new_feature_names:
new_features.append(f)
return new_matrix, new_features
return new_matrix
| 8,895
| 38.362832
| 113
|
py
|
featuretools
|
featuretools-main/featuretools/selection/__init__.py
|
# flake8: noqa
from featuretools.selection.api import *
| 56
| 18
| 40
|
py
|
featuretools
|
featuretools-main/featuretools/feature_discovery/LiteFeature.py
|
from __future__ import annotations
import hashlib
from dataclasses import field
from functools import total_ordering
from typing import Any, Dict, List, Optional, Set, Type, Union
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import LogicalType
from featuretools.feature_discovery.utils import (
get_primitive_return_type,
hash_primitive,
)
from featuretools.primitives.base.primitive_base import PrimitiveBase
@total_ordering
class LiteFeature:
_name: Optional[str] = None
_alias: Optional[str] = None
_logical_type: Optional[Type[LogicalType]] = None
_tags: Set[str] = field(default_factory=set)
_primitive: Optional[PrimitiveBase] = None
_base_features: List[LiteFeature] = field(default_factory=list)
_df_id: Optional[str] = None
_id: str
_n_output_features: int = 1
_depth = 0
_related_features: Set[LiteFeature]
_idx: int = 0
def __init__(
self,
name: Optional[str] = None,
logical_type: Optional[Type[LogicalType]] = None,
tags: Optional[Set[str]] = None,
primitive: Optional[PrimitiveBase] = None,
base_features: Optional[List[LiteFeature]] = None,
df_id: Optional[str] = None,
related_features: Optional[Set[LiteFeature]] = None,
idx: Optional[int] = None,
):
self._logical_type = logical_type
self._tags = tags if tags else set()
self._primitive = primitive
self._base_features = base_features if base_features else []
self._df_id = df_id
self._idx = idx if idx is not None else 0
self._related_features = related_features if related_features else set()
if self._primitive:
if not isinstance(self._primitive, PrimitiveBase):
raise ValueError("primitive input must be of type PrimitiveBase")
if len(self.base_features) == 0:
raise ValueError("there must be base features if given a primitive")
if self._primitive.commutative:
self._base_features = sorted(self._base_features)
self._n_output_features = self._primitive.number_output_features
self._depth = max([x.depth for x in self.base_features]) + 1
if name:
self._alias = name
self._name = self._primitive.generate_name(
[x.name for x in self.base_features],
)
return_column_schema = get_primitive_return_type(self._primitive)
self._logical_type = (
type(return_column_schema.logical_type)
if return_column_schema.logical_type
else None
)
self._tags = return_column_schema.semantic_tags
else:
if name is None:
raise TypeError("Name must be given if origin feature")
if self._logical_type is None:
raise TypeError("Logical Type must be given if origin feature")
self._name = name
if self._logical_type is not None and "index" not in self._tags:
self._tags = self._tags | self._logical_type.standard_tags
self._id = self._generate_hash()
@property
def name(self):
if self._alias:
return self._alias
elif self.is_multioutput():
return f"{self._name}[{self.idx}]"
return self._name
@name.setter
def name(self, _):
raise AttributeError("name is immutable")
def set_alias(self, value: Union[str, None]):
self._alias = value
@property
def non_indexed_name(self):
if not self.is_multioutput():
raise ValueError("only used on multioutput features")
return self._name
@property
def logical_type(self):
return self._logical_type
@logical_type.setter
def logical_type(self, _):
raise AttributeError("logical_type is immutable")
@property
def tags(self):
return self._tags.copy()
@tags.setter
def tags(self, _):
raise AttributeError("tags is immutable")
@property
def primitive(self):
return self._primitive
@primitive.setter
def primitive(self, _):
raise AttributeError("primitive is immutable")
@property
def base_features(self):
return self._base_features
@base_features.setter
def base_features(self, _):
raise AttributeError("base_features are immutable")
@property
def df_id(self):
return self._df_id
@df_id.setter
def df_id(self, _):
raise AttributeError("df_id is immutable")
@property
def id(self):
return self._id
@id.setter
def id(self, _):
raise AttributeError("id is immutable")
@property
def n_output_features(self):
return self._n_output_features
@n_output_features.setter
def n_output_features(self, _):
raise AttributeError("n_output_features is immutable")
@property
def depth(self):
return self._depth
@depth.setter
def depth(self, _):
raise AttributeError("depth is immutable")
@property
def related_features(self):
return self._related_features.copy()
@related_features.setter
def related_features(self, value: Set[LiteFeature]):
self._related_features = value
@property
def idx(self):
return self._idx
@idx.setter
def idx(self, _):
raise AttributeError("idx is immutable")
@staticmethod
def hash(
name: Optional[str],
primitive: Optional[PrimitiveBase] = None,
base_features: List[LiteFeature] = [],
df_id: Optional[str] = None,
idx: int = 0,
):
hash_msg = hashlib.sha256()
if primitive:
# TODO: hashing should be on primitive
hash_msg.update(hash_primitive(primitive)[0].encode("utf-8"))
commutative = primitive.commutative
assert (
len(base_features) > 0
), "there must be base features if give a primitive"
base_columns = base_features
if commutative:
base_features.sort()
for c in base_columns:
hash_msg.update(c.id.encode("utf-8"))
else:
assert name
hash_msg.update(name.encode("utf-8"))
if df_id:
hash_msg.update(df_id.encode("utf-8"))
hash_msg.update(str(idx).encode("utf-8"))
return hash_msg.hexdigest()
def __eq__(self, other: LiteFeature):
return self._id == other._id
def __lt__(self, other: LiteFeature):
return self._id < other._id
def __ne__(self, other):
return self._id != other._id
def __hash__(self):
return hash(self._id)
def _generate_hash(self) -> str:
return self.hash(
name=self._name,
primitive=self._primitive,
base_features=self._base_features,
df_id=self._df_id,
idx=self._idx,
)
def get_primitive_name(self) -> Union[str, None]:
return self._primitive.name if self._primitive else None
def get_dependencies(self, deep=False) -> List[LiteFeature]:
flattened_dependencies = []
for f in self._base_features:
flattened_dependencies.append(f)
if deep:
dependencies = f.get_dependencies()
if isinstance(dependencies, list):
flattened_dependencies.extend(dependencies)
else:
flattened_dependencies.append(dependencies)
return flattened_dependencies
def get_origin_features(self) -> List[LiteFeature]:
all_dependencies = self.get_dependencies(deep=True)
return [f for f in all_dependencies if f._depth == 0]
@property
def column_schema(self) -> ColumnSchema:
return ColumnSchema(logical_type=self.logical_type, semantic_tags=self.tags)
def dependent_primitives(self) -> Set[Type[PrimitiveBase]]:
dependent_features = self.get_dependencies(deep=True)
dependent_primitives = {
type(f._primitive) for f in dependent_features if f._primitive
}
if self._primitive:
dependent_primitives.add(type(self._primitive))
return dependent_primitives
def to_dict(self) -> Dict[str, Any]:
return {
"name": self.name,
"logical_type": self.logical_type.__name__ if self.logical_type else None,
"tags": list(self.tags),
"primitive": hash_primitive(self.primitive)[0] if self.primitive else None,
"base_features": [x.id for x in self.base_features],
"df_id": self.df_id,
"id": self.id,
"related_features": [x.id for x in self.related_features],
"idx": self.idx,
}
def is_multioutput(self) -> bool:
return len(self._related_features) > 0
def copy(self) -> LiteFeature:
copied_feature = LiteFeature(
name=self._name,
logical_type=self._logical_type,
tags=self._tags.copy(),
primitive=self._primitive,
base_features=[f.copy() for f in self._base_features],
df_id=self._df_id,
idx=self._idx,
related_features=self._related_features.copy(),
)
copied_feature.set_alias(self._alias)
return copied_feature
def __repr__(self) -> str:
name = f"name='{self.name}'"
logical_type = f"logical_type={self.logical_type}"
tags = f"tags={self.tags}"
primitive = f"primitive={self.get_primitive_name()}"
return f"LiteFeature({name}, {logical_type}, {tags}, {primitive})"
| 9,826
| 29.236923
| 87
|
py
|
featuretools
|
featuretools-main/featuretools/feature_discovery/feature_discovery.py
|
import inspect
from collections import defaultdict
from itertools import combinations, permutations, product
from typing import Iterable, List, Set, Tuple, Type, Union, cast
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import LogicalType
from woodwork.table_schema import TableSchema
from featuretools.feature_discovery.FeatureCollection import FeatureCollection
from featuretools.feature_discovery.LiteFeature import LiteFeature
from featuretools.feature_discovery.utils import column_schema_to_keys
from featuretools.primitives.base.primitive_base import PrimitiveBase
from featuretools.tests.testing_utils.generate_fake_dataframe import flatten_list
def _index_column_set(column_set: List[ColumnSchema]) -> List[Tuple[str, int]]:
"""
Indexes input set to find types of columns and the quantity of each
Args:
column_set (List(ColumnSchema)):
List of Column types needed by associated primitive.
Returns:
List[Tuple[str, int]]
A list of key, count tuples
Examples:
.. code-block:: python
from featuretools.feature_discovery.feature_discovery import _index_column_set
from woodwork.column_schema import ColumnSchema
column_set = [ColumnSchema(semantic_tags={"numeric"}), ColumnSchema(semantic_tags={"numeric"})]
indexed_column_set = _index_column_set(column_set)
[("numeric": 2)]
"""
out = defaultdict(int)
for column_schema in column_set:
key = column_schema_to_keys(column_schema)
out[key] += 1
return list(out.items())
def _get_features(
feature_collection: FeatureCollection,
column_keys: Tuple[Tuple[str, int]],
commutative: bool,
) -> List[List[LiteFeature]]:
"""
Calculates all LiteFeature combinations using the given hashmap of existing features, and the input set of required columns.
Args:
feature_collection (FeatureCollection):
An indexed feature collection object for efficient querying of features
column_keys (List[Tuple[str, int]]):
List of Column types needed by associated primitive.
commutative (bool):
whether or not we need to use product or combinations to create feature sets.
Returns:
List[List[LiteFeature]]
A list of LiteFeature sets.
Examples:
.. code-block:: python
from featuretools.feature_discovery.feature_discovery import _get_features
from woodwork.column_schema import ColumnSchema
feature_groups = {
"ANY": ["f1", "f2", "f3"],
"Double": ["f1", "f2", "f3"],
"numeric": ["f1", "f2", "f3"],
"Double,numeric": ["f1", "f2", "f3"],
}
column_set = [ColumnSchema(semantic_tags={"numeric"}), ColumnSchema(semantic_tags={"numeric"})]
features = _get_features(col_groups, column_set, commutative=False)
"""
prod_iter = []
for key, count in column_keys:
relevant_features = list(feature_collection.get_by_key(key))
if commutative:
prod_iter.append(combinations(relevant_features, count))
else:
prod_iter.append(permutations(relevant_features, count))
feature_combinations = product(*prod_iter)
return [flatten_list(x) for x in feature_combinations]
def _primitive_to_columnsets(primitive: PrimitiveBase) -> List[List[ColumnSchema]]:
column_sets = primitive.input_types
assert column_sets is not None
if not isinstance(column_sets[0], list):
column_sets = [primitive.input_types]
column_sets = cast(List[List[ColumnSchema]], column_sets)
# Some primitives are commutative, yet have explicit versions of commutative pairs (eg. MultiplyNumericBoolean),
# which would create multiple versions, so this resolved that.
if primitive.commutative:
existing = set()
uniq_column_sets = []
for column_set in column_sets:
key = "_".join(sorted([x.__repr__() for x in column_set]))
if key not in existing:
uniq_column_sets.append(column_set)
existing.add(key)
column_sets = uniq_column_sets
return column_sets
def _get_matching_features(
feature_collection: FeatureCollection,
primitive: PrimitiveBase,
) -> List[List[LiteFeature]]:
"""
For a given primitive, find all feature sets that can be used to create new feature
Args:
feature_collection (FeatureCollection):
An indexed feature collection object for efficient querying of features
primitive (PrimitiveBase)
Returns:
List[List[LiteFeature]]
List of feature sets
Examples:
.. code-block:: python
from featuretools.feature_discovery.feature_discovery import get_matching_columns
from woodwork.column_schema import ColumnSchema
feature_groups = {
"ANY": ["f1", "f2", "f3"],
"Double": ["f1", "f2", "f3"],
"numeric": ["f1", "f2", "f3"],
"Double,numeric": ["f1", "f2", "f3"],
}
feature_sets = _get_matching_features(col_groups, AddNumeric)
[
["f1", "f2"],
["f1", "f3"],
["f2", "f3"]
]
"""
column_sets = _primitive_to_columnsets(primitive=primitive)
column_keys_set = [_index_column_set(c) for c in column_sets]
commutative = primitive.commutative
feature_sets = []
for column_keys in column_keys_set:
assert column_keys is not None
feature_sets_ = _get_features(
feature_collection=feature_collection,
column_keys=tuple(column_keys),
commutative=commutative,
)
feature_sets.extend(feature_sets_)
return feature_sets
def _features_from_primitive(
primitive: PrimitiveBase,
feature_collection: FeatureCollection,
) -> List[LiteFeature]:
"""
For a given primitive, creates all engineered features
Args:
primitive (Type[PrimitiveBase])
feature_collection (FeatureCollection):
An indexed feature collection object for efficient querying of features
Returns:
List[List[LiteFeature]]
List of feature sets
Examples:
.. code-block:: python
from featuretools.feature_discovery.feature_discovery import get_matching_columns
from woodwork.column_schema import ColumnSchema
feature_groups = {
"ANY": ["f1", "f2", "f3"],
"Double": ["f1", "f2", "f3"],
"numeric": ["f1", "f2", "f3"],
"Double,numeric": ["f1", "f2", "f3"],
}
feature_sets = _features_from_primitive(AddNumeric, feature_groups)
[
["f1", "f2"],
["f1", "f3"],
["f2", "f3"]
]
"""
assert isinstance(primitive, PrimitiveBase)
features: List[LiteFeature] = []
feature_sets = _get_matching_features(
feature_collection=feature_collection,
primitive=primitive,
)
for feature_set in feature_sets:
if primitive.number_output_features > 1:
related_features: Set[LiteFeature] = set()
for n in range(primitive.number_output_features):
feature = LiteFeature(
primitive=primitive,
base_features=feature_set,
idx=n,
)
related_features.add(feature)
for f in related_features:
f.related_features = related_features - {f}
features.append(f)
else:
features.append(
LiteFeature(
primitive=primitive,
base_features=feature_set,
),
)
return features
def schema_to_features(schema: TableSchema) -> List[LiteFeature]:
"""
** EXPERIMENTAL **
Convert a Woodwork Schema object to a list of LiteFeatures.
Args:
schema (TableSchema):
Woodwork TableSchema object
Returns:
List[LiteFeature]
Examples:
.. code-block:: python
from featuretools.feature_discovery.feature_discovery import schema_to_features
from featuretools.primitives import Absolute, IsNull
import pandas as pd
import woodwork as ww
df = pd.DataFrame({
"idx": [0,1,2,3],
"f1": ["A", "B", "C", "D"],
"f2": [1.2, 2.3, 3.4, 4.5]
})
df.ww.init()
features = schema_to_features(df.ww.schema)
"""
features = []
for col_name, column_schema in schema.columns.items():
assert isinstance(column_schema, ColumnSchema)
logical_type = column_schema.logical_type
assert logical_type
assert issubclass(type(logical_type), LogicalType)
tags = column_schema.semantic_tags
assert isinstance(tags, set)
features.append(
LiteFeature(
name=col_name,
logical_type=type(logical_type),
tags=tags,
),
)
return features
def _check_inputs(
input_features: Iterable[LiteFeature],
primitives: Union[List[Type[PrimitiveBase]], List[PrimitiveBase]],
) -> Tuple[Iterable[LiteFeature], List[PrimitiveBase]]:
if not isinstance(input_features, Iterable):
raise ValueError("input_features must be an iterable of LiteFeature objects")
for feature in input_features:
if not isinstance(feature, LiteFeature):
raise ValueError(
"input_features must be an iterable of LiteFeature objects",
)
if not isinstance(primitives, List):
raise ValueError(
"primitives must be a list of Primitive classes or Primitive instances",
)
primitive_instances: List[PrimitiveBase] = []
for primitive in primitives:
if inspect.isclass(primitive) and issubclass(primitive, PrimitiveBase):
primitive_instances.append(primitive())
elif isinstance(primitive, PrimitiveBase):
primitive_instances.append(primitive)
else:
raise ValueError(
"primitives must be a list of Primitive classes or Primitive instances",
)
return (input_features, primitive_instances)
def generate_features_from_primitives(
input_features: Iterable[LiteFeature],
primitives: Union[List[Type[PrimitiveBase]], List[PrimitiveBase]],
) -> List[LiteFeature]:
"""
** EXPERIMENTAL **
Calculates all Features for a given input of features and a list of primitives.
Args:
origin_features (List[LiteFeature]):
List of origin features
primitives (List[Type[PrimitiveBase]])
List of primitive classes
Returns:
List[LiteFeature]
Examples:
.. code-block:: python
from featuretools.feature_discovery.feature_discovery import lite_dfs
from featuretools.primitives import Absolute, IsNull
import pandas as pd
import woodwork as ww
df = pd.DataFrame({
"idx": [0,1,2,3],
"f1": ["A", "B", "C", "D"],
"f2": [1.2, 2.3, 3.4, 4.5]
})
df.ww.init()
origin_features = schema_to_features(df.ww.schema)
features = lite_dfs(origin_features, [Absolute, IsNull])
"""
(input_features, primitives) = _check_inputs(input_features, primitives)
features = [x.copy() for x in input_features]
feature_collection = FeatureCollection(features=features)
feature_collection.reindex()
for primitive in primitives:
features_ = _features_from_primitive(
primitive=primitive,
feature_collection=feature_collection,
)
features.extend(features_)
return features
| 12,161
| 30.754569
| 128
|
py
|
featuretools
|
featuretools-main/featuretools/feature_discovery/FeatureCollection.py
|
from __future__ import annotations
import hashlib
from itertools import combinations
from typing import Any, Dict, List, Optional, Set, Type, Union, cast
from woodwork.logical_types import LogicalType
from featuretools.feature_discovery.LiteFeature import LiteFeature
from featuretools.feature_discovery.type_defs import ANY
from featuretools.feature_discovery.utils import hash_primitive, logical_types_map
from featuretools.primitives.base.primitive_base import PrimitiveBase
from featuretools.primitives.utils import (
PrimitivesDeserializer,
)
class FeatureCollection:
def __init__(self, features: List[LiteFeature]):
self._all_features: List[LiteFeature] = features
self.indexed = False
self.sorted = False
self._hash_key: Optional[str] = None
def sort_features(self):
if not self.sorted:
self._all_features = sorted(self._all_features)
self.sorted = True
def __repr__(self):
return f"<FeatureCollection ({self.hash_key[:5]}) n_features={len(self._all_features)} indexed={self.indexed}>"
@property
def all_features(self):
return self._all_features.copy()
@property
def hash_key(self) -> str:
if self._hash_key is None:
if not self.sorted:
self.sort_features()
self._set_hash()
assert self._hash_key is not None
return self._hash_key
def _set_hash(self):
hash_msg = hashlib.sha256()
for feature in self._all_features:
hash_msg.update(feature.id.encode("utf-8"))
self._hash_key = hash_msg.hexdigest()
return self
def __hash__(self):
return hash(self.hash_key)
def __eq__(self, other: FeatureCollection) -> bool:
return self.hash_key == other.hash_key
def reindex(self) -> FeatureCollection:
self.by_logical_type: Dict[
Union[Type[LogicalType], None],
Set[LiteFeature],
] = {}
self.by_tag: Dict[str, Set[LiteFeature]] = {}
self.by_origin_feature: Dict[LiteFeature, Set[LiteFeature]] = {}
self.by_depth: Dict[int, Set[LiteFeature]] = {}
self.by_name: Dict[str, LiteFeature] = {}
self.by_key: Dict[str, List[LiteFeature]] = {}
for feature in self._all_features:
for key in self.feature_to_keys(feature):
self.by_key.setdefault(key, []).append(feature)
logical_type = feature.logical_type
self.by_logical_type.setdefault(logical_type, set()).add(feature)
tags = feature.tags
for tag in tags:
self.by_tag.setdefault(tag, set()).add(feature)
origin_features = feature.get_origin_features()
for origin_feature in origin_features:
self.by_origin_feature.setdefault(origin_feature, set()).add(feature)
if feature.depth == 0:
self.by_origin_feature.setdefault(feature, set()).add(feature)
feature_name = feature.name
assert feature_name is not None
assert feature_name not in self.by_name
self.by_name[feature_name] = feature
self.indexed = True
return self
def get_by_logical_type(self, logical_type: Type[LogicalType]) -> Set[LiteFeature]:
return self.by_logical_type.get(logical_type, set())
def get_by_tag(self, tag: str) -> Set[LiteFeature]:
return self.by_tag.get(tag, set())
def get_by_origin_feature(self, origin_feature: LiteFeature) -> Set[LiteFeature]:
return self.by_origin_feature.get(origin_feature, set())
def get_by_origin_feature_name(self, name: str) -> Union[LiteFeature, None]:
feature = self.by_name.get(name)
return feature
def get_dependencies_by_origin_name(self, name) -> Set[LiteFeature]:
origin_feature = self.by_name.get(name)
if origin_feature:
return self.by_origin_feature[origin_feature]
return set()
def get_by_key(self, key: str) -> List[LiteFeature]:
return self.by_key.get(key, [])
def flatten_features(self) -> Dict[str, LiteFeature]:
all_features_dict: Dict[str, LiteFeature] = {}
def rfunc(feature_list: List[LiteFeature]):
for feature in feature_list:
all_features_dict.setdefault(feature.id, feature)
rfunc(feature.base_features)
rfunc(self._all_features)
return all_features_dict
def flatten_primitives(self) -> Dict[str, Dict[str, Any]]:
all_primitives_dict: Dict[str, Dict[str, Any]] = {}
def rfunc(feature_list: List[LiteFeature]):
for feature in feature_list:
if feature.primitive:
key, prim_dict = hash_primitive(feature.primitive)
all_primitives_dict.setdefault(key, prim_dict)
rfunc(feature.base_features)
rfunc(self._all_features)
return all_primitives_dict
def to_dict(self):
all_primitives_dict = self.flatten_primitives()
all_features_dict = self.flatten_features()
return {
"primitives": all_primitives_dict,
"feature_ids": [f.id for f in self._all_features],
"all_features": {k: f.to_dict() for k, f in all_features_dict.items()},
}
@staticmethod
def feature_to_keys(feature: LiteFeature) -> List[str]:
"""
Generate hashing keys from LiteFeature. For example:
- LiteFeature("f1", Double, {"numeric"}) -> ['Double', 'numeric', 'Double,numeric', 'ANY']
- LiteFeature("f1", Datetime, {"time_index"}) -> ['Datetime', 'time_index', 'Datetime,time_index', 'ANY']
- LiteFeature("f1", Double, {"index", "other"}) -> ['Double', 'index', 'other', 'Double,index', 'Double,other', 'ANY']
Args:
feature (LiteFeature):
Returns:
List[str]
List of hashing keys
"""
keys: List[str] = []
logical_type = feature.logical_type
logical_type_name = None
if logical_type is not None:
logical_type_name = logical_type.__name__
keys.append(logical_type_name)
all_tags = sorted(feature.tags)
tag_combinations = []
# generate combinations of all lengths from 1 to the length of the input list
for i in range(1, len(all_tags) + 1):
# generate combinations of length i and append to the combinations_list
for comb in combinations(all_tags, i):
tag_combinations.append(list(comb))
for tag_combination in tag_combinations:
tags_key = ",".join(tag_combination)
keys.append(tags_key)
if logical_type_name:
keys.append(f"{logical_type_name},{tags_key}")
keys.append(ANY)
return keys
@staticmethod
def from_dict(input_dict):
primitive_deserializer = PrimitivesDeserializer()
primitives = {}
for prim_key, prim_dict in input_dict["primitives"].items():
primitive = primitive_deserializer.deserialize_primitive(
prim_dict,
)
assert isinstance(primitive, PrimitiveBase)
primitives[prim_key] = primitive
hydrated_features: Dict[str, LiteFeature] = {}
feature_ids: List[str] = cast(List[str], input_dict["feature_ids"])
all_features: Dict[str, Any] = cast(Dict[str, Any], input_dict["all_features"])
def hydrate_feature(feature_id: str) -> LiteFeature:
if feature_id in hydrated_features:
return hydrated_features[feature_id]
feature_dict = all_features[feature_id]
base_features = [hydrate_feature(x) for x in feature_dict["base_features"]]
logical_type = (
logical_types_map[feature_dict["logical_type"]]
if feature_dict["logical_type"]
else None
)
hydrated_feature = LiteFeature(
name=feature_dict["name"],
logical_type=logical_type,
tags=set(feature_dict["tags"]),
primitive=primitives[feature_dict["primitive"]]
if feature_dict["primitive"]
else None,
base_features=base_features,
df_id=feature_dict["df_id"],
related_features=set(),
idx=feature_dict["idx"],
)
assert hydrated_feature.id == feature_dict["id"] == feature_id
hydrated_features[feature_id] = hydrated_feature
# need to link after features are stored on cache
related_features = [
hydrate_feature(x) for x in feature_dict["related_features"]
]
hydrated_feature.related_features = set(related_features)
return hydrated_feature
return FeatureCollection([hydrate_feature(x) for x in feature_ids])
| 9,039
| 34.873016
| 126
|
py
|
featuretools
|
featuretools-main/featuretools/feature_discovery/convertors.py
|
from __future__ import annotations
from typing import Dict, List
import pandas as pd
from woodwork.logical_types import LogicalType
from featuretools.feature_base.feature_base import (
FeatureBase,
IdentityFeature,
TransformFeature,
)
from featuretools.feature_discovery.LiteFeature import LiteFeature
from featuretools.primitives import TransformPrimitive
from featuretools.primitives.base.primitive_base import PrimitiveBase
FeatureCache = Dict[str, FeatureBase]
def convert_featurebase_list_to_feature_list(
featurebase_list: List[FeatureBase],
) -> List[LiteFeature]:
"""
Convert a List of FeatureBase objects to a list LiteFeature objects
Args:
featurebase_list (List[FeatureBase]):
Returns:
LiteFeatures (List[LiteFeature]) - converted LiteFeature objects
"""
def rfunc(fb: FeatureBase) -> List[LiteFeature]:
base_features = [
feature
for feature_list in [rfunc(x) for x in fb.base_features]
for feature in feature_list
]
col_schema = fb.column_schema
logical_type = col_schema.logical_type
if logical_type is not None:
assert issubclass(type(logical_type), LogicalType)
logical_type = type(logical_type)
tags = col_schema.semantic_tags
if isinstance(fb, IdentityFeature):
primitive = None
else:
primitive = fb.primitive
assert isinstance(primitive, PrimitiveBase)
if fb.number_output_features > 1:
features: List[LiteFeature] = []
for idx, name in enumerate(fb.get_feature_names()):
f = LiteFeature(
name=name,
logical_type=logical_type,
tags=tags,
primitive=primitive,
base_features=base_features,
# TODO: use when working with multi-table
df_id=None,
idx=idx,
)
features.append(f)
for feature in features:
related_features = [f for f in features if f.id != feature.id]
feature.related_features = set(related_features)
return features
return [
LiteFeature(
name=fb.get_name(),
logical_type=logical_type,
tags=tags,
primitive=primitive,
base_features=base_features,
# TODO: use when working with multi-table
df_id=None,
),
]
return [
feature
for feature_list in [rfunc(fb) for fb in featurebase_list]
for feature in feature_list
]
def _feature_to_transform_feature(
feature: LiteFeature,
base_features: List[FeatureBase],
) -> FeatureBase:
"""
Transform LiteFeature into FeatureBase object. Handles the Multi-output
feature in correct way.
Args:
feature (LiteFeature)
base_features (List[FeatureBase])
Returns:
FeatureBase
"""
assert feature.primitive
assert isinstance(
feature.primitive,
TransformPrimitive,
), "Only Transform Primitives"
fb = TransformFeature(base_features, feature.primitive)
if feature.is_multioutput():
sorted_features = sorted(
[f for f in feature.related_features] + [feature],
key=lambda x: x.idx,
)
names = [x.name for x in sorted_features]
fb = fb.rename(feature.non_indexed_name)
fb.set_feature_names(names)
else:
fb = fb.rename(feature.name)
return fb
def _convert_feature_to_featurebase(
feature: LiteFeature,
dataframe: pd.DataFrame,
cache: FeatureCache,
) -> FeatureBase:
"""
Recursively transforms a LiteFeature object into a Featurebase object
Args:
feature (LiteFeature)
base_features (List[FeatureBase])
cache (FeatureCache) already converted features
Returns:
FeatureBase
"""
def get_base_features(
feature: LiteFeature,
) -> List[FeatureBase]:
new_base_features: List[FeatureBase] = []
for bf in feature.base_features:
fb = rfunc(bf)
if bf.is_multioutput():
idx = bf.idx
# if its multioutput, you can index on the FeatureBase
new_base_features.append(fb[idx])
else:
new_base_features.append(fb)
return new_base_features
def rfunc(feature: LiteFeature) -> FeatureBase:
# if feature has already been converted, return from cache
if feature.id in cache:
return cache[feature.id]
# if depth is 0, we are at an origin feature
if feature.depth == 0:
fb = IdentityFeature(dataframe.ww[feature.name])
cache[feature.id] = fb
return fb
base_features = get_base_features(feature)
fb = _feature_to_transform_feature(feature, base_features)
cache[feature.id] = fb
return fb
return rfunc(feature)
def convert_feature_list_to_featurebase_list(
feature_list: List[LiteFeature],
dataframe: pd.DataFrame,
) -> List[FeatureBase]:
"""
Convert a list of LiteFeature objects into a list of FeatureBase objects
Args:
feature_list (List[LiteFeature])
dataframe (pd.DataFrame)
Returns:
List[FeatureBase]
"""
feature_cache: FeatureCache = {}
converted_features: List[FeatureBase] = []
for feature in feature_list:
if feature.is_multioutput():
related_feature_ids = [f.id for f in feature.related_features]
if any((x in feature_cache for x in related_feature_ids)):
# feature base already created for related ids
continue
fb = _convert_feature_to_featurebase(
feature=feature,
dataframe=dataframe,
cache=feature_cache,
)
converted_features.append(fb)
return converted_features
| 6,120
| 27.207373
| 78
|
py
|
featuretools
|
featuretools-main/featuretools/feature_discovery/utils.py
|
import hashlib
import json
from functools import lru_cache
from typing import Any, Dict, Tuple
from woodwork.column_schema import ColumnSchema
from featuretools.feature_discovery.type_defs import ANY
from featuretools.primitives.base.primitive_base import PrimitiveBase
from featuretools.primitives.utils import (
get_all_logical_type_names,
get_all_primitives,
serialize_primitive,
)
primitives_map = get_all_primitives()
logical_types_map = get_all_logical_type_names()
def column_schema_to_keys(column_schema: ColumnSchema) -> str:
"""
Generate a hashing key from a Columns Schema. For example:
- ColumnSchema(logical_type=Double) -> "Double"
- ColumnSchema(semantic_tags={"index"}) -> "index"
- ColumnSchema(logical_type=Double, semantic_tags={"index", "other"}) -> "Double,index,other"
Args:
column_schema (ColumnSchema):
Returns:
str: hashing key
"""
logical_type = column_schema.logical_type
tags = column_schema.semantic_tags
lt_key = None
if logical_type:
lt_key = type(logical_type).__name__
tags = sorted(tags)
if len(tags) > 0:
tag_key = ",".join(tags)
return f"{lt_key},{tag_key}" if lt_key is not None else tag_key
elif lt_key is not None:
return lt_key
else:
return ANY
@lru_cache(maxsize=None)
def hash_primitive(primitive: PrimitiveBase) -> Tuple[str, Dict[str, Any]]:
hash_msg = hashlib.sha256()
primitive_name = primitive.name
assert isinstance(primitive_name, str)
primitive_dict = serialize_primitive(primitive)
primitive_json = json.dumps(primitive_dict).encode("utf-8")
hash_msg.update(primitive_json)
key = hash_msg.hexdigest()
return (key, primitive_dict)
def get_primitive_return_type(primitive: PrimitiveBase) -> ColumnSchema:
"""
Get Return type from a primitive
Args:
primitive (PrimitiveBase)
Returns:
ColumnSchema
"""
if primitive.return_type:
return primitive.return_type
return_type = primitive.input_types[0]
if isinstance(return_type, list):
return_type = return_type[0]
return return_type
| 2,172
| 26.858974
| 97
|
py
|
featuretools
|
featuretools-main/featuretools/feature_discovery/type_defs.py
|
ANY = "ANY"
| 12
| 5.5
| 11
|
py
|
featuretools
|
featuretools-main/docs/notebook_version_standardizer.py
|
import json
import os
import click
DOCS_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "source")
def _get_ipython_notebooks(docs_source):
directories_to_skip = ["_templates", "generated", ".ipynb_checkpoints"]
notebooks = []
for root, _, filenames in os.walk(docs_source):
if any(dir_ in root for dir_ in directories_to_skip):
continue
for filename in filenames:
if filename.endswith(".ipynb"):
notebooks.append(os.path.join(root, filename))
return notebooks
def _check_delete_empty_cell(notebook, delete=True):
with open(notebook, "r") as f:
source = json.load(f)
cell = source["cells"][-1]
if cell["cell_type"] == "code" and cell["source"] == []:
# this is an empty cell, which we should delete
if delete:
source["cells"] = source["cells"][:-1]
else:
return False
if delete:
with open(notebook, "w") as f:
json.dump(source, f, ensure_ascii=False, indent=1)
else:
return True
def _check_execution_and_output(notebook):
with open(notebook, "r") as f:
source = json.load(f)
for cells in source["cells"]:
if cells["cell_type"] == "code" and (
cells["execution_count"] is not None or cells["outputs"] != []
):
return False
return True
def _check_python_version(notebook, default_version):
with open(notebook, "r") as f:
source = json.load(f)
if source["metadata"]["language_info"]["version"] != default_version:
return False
return True
def _fix_python_version(notebook, default_version):
with open(notebook, "r") as f:
source = json.load(f)
source["metadata"]["language_info"]["version"] = default_version
with open(notebook, "w") as f:
json.dump(source, f, ensure_ascii=False, indent=1)
def _fix_execution_and_output(notebook):
with open(notebook, "r") as f:
source = json.load(f)
for cells in source["cells"]:
if cells["cell_type"] == "code" and cells["execution_count"] is not None:
cells["execution_count"] = None
cells["outputs"] = []
source["metadata"]["kernelspec"]["display_name"] = "Python 3"
source["metadata"]["kernelspec"]["name"] = "python3"
with open(notebook, "w") as f:
json.dump(source, f, ensure_ascii=False, indent=1)
def _get_notebooks_with_executions_and_empty(notebooks, default_version="3.8.2"):
executed = []
empty_last_cell = []
versions = []
for notebook in notebooks:
if not _check_execution_and_output(notebook):
executed.append(notebook)
if not _check_delete_empty_cell(notebook, delete=False):
empty_last_cell.append(notebook)
if not _check_python_version(notebook, default_version):
versions.append(notebook)
return (executed, empty_last_cell, versions)
def _fix_versions(notebooks, default_version="3.8.2"):
for notebook in notebooks:
_fix_python_version(notebook, default_version)
def _remove_notebook_empty_last_cell(notebooks):
for notebook in notebooks:
_check_delete_empty_cell(notebook, delete=True)
def _standardize_outputs(notebooks):
for notebook in notebooks:
_fix_execution_and_output(notebook)
@click.group()
def cli():
"""no-op"""
@cli.command()
def standardize():
notebooks = _get_ipython_notebooks(DOCS_PATH)
(
executed_notebooks,
empty_cells,
versions,
) = _get_notebooks_with_executions_and_empty(notebooks)
if executed_notebooks:
_standardize_outputs(executed_notebooks)
executed_notebooks = ["\t" + notebook for notebook in executed_notebooks]
executed_notebooks = "\n".join(executed_notebooks)
click.echo(f"Removed the outputs for:\n {executed_notebooks}")
if empty_cells:
_remove_notebook_empty_last_cell(empty_cells)
empty_cells = ["\t" + notebook for notebook in empty_cells]
empty_cells = "\n".join(empty_cells)
click.echo(f"Removed the empty cells for:\n {empty_cells}")
if versions:
_fix_versions(versions)
versions = ["\t" + notebook for notebook in versions]
versions = "\n".join(versions)
click.echo(f"Fixed python versions for:\n {versions}")
@cli.command()
def check_execution():
notebooks = _get_ipython_notebooks(DOCS_PATH)
(
executed_notebooks,
empty_cells,
versions,
) = _get_notebooks_with_executions_and_empty(notebooks)
if executed_notebooks:
executed_notebooks = ["\t" + notebook for notebook in executed_notebooks]
executed_notebooks = "\n".join(executed_notebooks)
raise SystemExit(
f"The following notebooks have executed outputs:\n {executed_notebooks}\n"
"Please run make lint-fix to fix this.",
)
if empty_cells:
empty_cells = ["\t" + notebook for notebook in empty_cells]
empty_cells = "\n".join(empty_cells)
raise SystemExit(
f"The following notebooks have empty cells at the end:\n {empty_cells}\n"
"Please run make lint-fix to fix this.",
)
if versions:
versions = ["\t" + notebook for notebook in versions]
versions = "\n".join(versions)
raise SystemExit(
f"The following notebooks have the wrong Python version: \n {versions}\n"
"Please run make lint-fix to fix this.",
)
if __name__ == "__main__":
cli()
| 5,577
| 31.811765
| 86
|
py
|
featuretools
|
featuretools-main/docs/source/setup.py
|
import os
import featuretools as ft
def load_feature_plots():
es = ft.demo.load_mock_customer(return_entityset=True)
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"getting_started/graphs/",
)
agg_feat = ft.AggregationFeature(
ft.IdentityFeature(es["sessions"].ww["session_id"]),
"customers",
ft.primitives.Count,
)
trans_feat = ft.TransformFeature(
ft.IdentityFeature(es["customers"].ww["join_date"]),
ft.primitives.TimeSincePrevious,
)
demo_feat = ft.AggregationFeature(
ft.TransformFeature(
ft.IdentityFeature(es["transactions"].ww["transaction_time"]),
ft.primitives.Weekday,
),
"sessions",
ft.primitives.Mode,
)
ft.graph_feature(agg_feat, to_file=os.path.join(path, "agg_feat.dot"))
ft.graph_feature(trans_feat, to_file=os.path.join(path, "trans_feat.dot"))
ft.graph_feature(demo_feat, to_file=os.path.join(path, "demo_feat.dot"))
if __name__ == "__main__":
load_feature_plots()
| 1,071
| 28.777778
| 78
|
py
|
featuretools
|
featuretools-main/docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# featuretools documentation build configuration file, created by
# sphinx-quickstart on Thu May 19 20:40:30 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import shutil
import subprocess
import sys
from pathlib import Path
import featuretools
# run setup script
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "setup.py")
subprocess.check_call([sys.executable, path])
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../featuretools"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.ifconfig",
"sphinx.ext.githubpages",
"nbsphinx",
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"sphinx.ext.extlinks",
"sphinx.ext.viewcode",
"sphinx.ext.graphviz",
"sphinx_inline_tabs",
"sphinx_copybutton",
"myst_parser",
]
# ipython_mplbackend = None
ipython_execlines = ["import pandas as pd", "pd.set_option('display.width', 1000000)"]
# autosummary_generate=True
autosummary_generate = ["api_reference.rst"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Featuretools"
copyright = "2019, Feature Labs. BSD License"
author = "Feature Labs, Inc."
latex_documents = [
(master_doc, "featuretools.tex", "test Documentation", "test", "manual"),
]
latex_elements = {
"preamble": r"""
\usepackage[utf8]{inputenc}
""",
}
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = featuretools.__version__
# The full version, including alpha/beta/rc tags.
release = featuretools.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["**.ipynb_checkpoints"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"pygment_light_style": "tango",
"pygment_dark_style": "native",
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/alteryx/featuretools",
"icon": "fab fa-github-square",
"type": "fontawesome",
},
{
"name": "Twitter",
"url": "https://twitter.com/AlteryxOSS",
"icon": "fab fa-twitter-square",
"type": "fontawesome",
},
{
"name": "Slack",
"url": "https://join.slack.com/t/alteryx-oss/shared_invite/zt-182tyvuxv-NzIn6eiCEf8TBziuKp0bNA",
"icon": "fab fa-slack",
"type": "fontawesome",
},
{
"name": "StackOverflow",
"url": "https://stackoverflow.com/questions/tagged/featuretools",
"icon": "fab fa-stack-overflow",
"type": "fontawesome",
},
],
"collapse_navigation": False,
"navigation_depth": 2,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# html_title = u'featuretools v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/images/featuretools_nav2.svg"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/images/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"**": ["globaltoc.html", "relations.html", "sourcelink.html", "searchbox.html"],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "featuretoolsdoc"
# -- Options for Markdown files ----------------------------------------------
myst_admonition_enable = True
myst_deflist_enable = True
myst_heading_anchors = 3
# -- Options for Sphinx Copy Button ------------------------------------------
copybutton_prompt_text = "myinputprompt"
copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
copybutton_prompt_is_regexp = True
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"featuretools.tex",
"Featuretools Documentation",
"Feature Labs, Inc.",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "featuretools", "featuretools Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"featuretools",
"featuretools Documentation",
author,
"featuretools",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
nbsphinx_execute = "auto"
extlinks = {
"issue": ("https://github.com/alteryx/featuretools/issues/%s", "GH#%s"),
"pr": ("https://github.com/alteryx/featuretools/pull/%s", "GH#%s"),
"user": ("https://github.com/%s", "@%s"),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
def setup(app):
home_dir = os.environ.get("HOME", "/")
ipython_p = Path(home_dir + "/.ipython/profile_default/startup")
ipython_p.mkdir(parents=True, exist_ok=True)
file_p = os.path.abspath(os.path.dirname(__file__))
shutil.copy(
file_p + "/set-headers.py",
home_dir + "/.ipython/profile_default/startup",
)
app.add_css_file("style.css")
| 13,163
| 31.029197
| 108
|
py
|
featuretools
|
featuretools-main/docs/source/set-headers.py
|
import urllib.request
opener = urllib.request.build_opener()
opener.addheaders = [("Testing", "True")]
urllib.request.install_opener(opener)
| 142
| 22.833333
| 41
|
py
|
trans-bounds-maj-vote
|
trans-bounds-maj-vote-master/self_learning.py
|
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import pyximport;
pyximport.install()
import self_learning_cython as slc
def joint_bayes_risk(margin, pred, i, j, theta, samplingRate=50):
# li = \sum_{x\in X_U} \I{y=i} =approx.= \sum_{x\in X_U} m_Q(x,i)
li = np.sum(margin[:, i])
margins = margin[:, j]
# gammas = sorted(list(set(margins[margins > theta])))
gammas = theta + (1 - theta) * (np.arange(samplingRate) + 1) / samplingRate
infimum = 1e+05
upperBounds = []
# for gamma in gammas:
for n in range(np.size(gammas)):
gamma = gammas[n]
I_ij = np.sum(margin[np.array((margins < gamma) & (margins >= theta)), i]) / li
K_ij = np.dot(margin[:, i], np.array(pred == j) * margins) / li
# M-less of gamma
Mg_ij = np.dot(margin[:, i], np.array(margins < gamma) * margins) / li
# M-less of theta
Mt_ij = np.dot(margin[:, i], np.array(margins < theta) * margins) / li
A = K_ij + Mt_ij - Mg_ij
upperBound = I_ij + (A * (A > 0)) / gamma
upperBounds.append(upperBound)
if upperBound < infimum:
infimum = upperBound
if n > 3:
if upperBounds[-1] > upperBounds[-2] and upperBounds[-2] >= upperBounds[-3]:
break
return infimum
def optimal_threshold_vector(margin, pred, K, samplingRate=50):
theta = []
def Reduction(matrix, margin):
K = margin.shape[1]
u = margin.shape[0]
countClass = np.array([np.sum(margin[:, j]) for j in range(K)])
return (1 / u) * np.dot(countClass, np.sum(matrix, axis=1))
u = margin.shape[0]
for k in range(K):
# A set of possible thetas:
theta_min = np.min(margin[:, k])
theta_max = np.max(margin[:, k])
thetas = theta_min + np.arange(samplingRate) * (theta_max - theta_min) / samplingRate
JBR = []
BE = []
for n in range(samplingRate):
matrix = np.zeros((K, K))
for i in range(K):
if i == k:
continue
else:
matrix[i, k] = joint_bayes_risk(margin, pred, i, k, thetas[n])
if (i == 0) and (k == 1):
JBR.append(matrix[i, k])
pbl = (1 / u) * np.sum((margin[:, k] >= thetas[n]) & (pred == k))
if pbl == 0:
pbl = 1e-15
BE.append(Reduction(matrix, margin)/pbl)
if n > 3:
if BE[-1] > BE[-2] and BE[-2] >= BE[-3]:
break
BE = np.array(BE)
num = np.argmin(BE)
if type(num) is list:
num = num[0]
theta.append(thetas[num])
return np.array(theta)
def msla(x_l, y_l, x_u, cython=True, **kwargs):
"""
A margin-based self-learning algorithm.
:param x_l: Labeled observations.
:param y_l: Labels.
:param x_u: Unlabeled data. Will be used for learning.
:param cython: Whether or not to use cython code, which gives speedup in computation. The default value is True.
:return: The final classification model H that has been trained on (x_l, y_l)
and pseudo-labeled (x_u, yPred)
"""
if 'n_estimators' not in kwargs:
n_est = 200
else:
n_est = kwargs['n_estimators']
if 'random_state' not in kwargs:
rand_state = None
else:
rand_state = kwargs['random_state']
classifier = RandomForestClassifier(n_estimators=n_est, oob_score=True, n_jobs=-1, random_state=rand_state)
l = x_l.shape[0]
sample_distr = np.repeat(1 / l, l)
K = np.unique(y_l).shape[0]
b = True
thetas = []
while b:
u = x_u.shape[0]
# Learn a classifier
H = classifier
H.fit(x_l, y_l, sample_weight=sample_distr)
margin_u = H.predict_proba(x_u)
pred_u = np.argmax(margin_u, axis=1)
# Find a threshold minimizing Bayes conditional error
if cython:
theta = slc.c_optimal_threshold_vector(margin_u, pred_u, K)
else:
theta = optimal_threshold_vector(margin_u, pred_u, K)
thetas.append(theta)
# Select observations with argmax margin more than corresponding theta
selection = np.array(margin_u[np.arange(u), pred_u] >= theta[pred_u])
x_s = x_u[selection, :]
y_s = pred_u[selection]
# Stop if there is no anything to add:
if x_s.shape[0] == 0:
b = False
continue
# Move them from the unlabeled set to the train one
x_l = np.concatenate((x_l, x_s))
y_l = np.concatenate((y_l, y_s))
x_u = np.delete(x_u, np.where(selection), axis=0)
s = x_l.shape[0] - l
sample_distr = np.concatenate((np.repeat(1 / l, l), np.repeat(1 / s, s)))
# Stop criterion
if x_u.shape[0] == 0:
b = False
return H, thetas
def fsla(x_l, y_l, x_u, theta, max_iter, **kwargs):
"""
A margin-based self-learning algorithm.
:param x_l: Labeled observations.
:param y_l: Labels.
:param x_u: Unlabeled data. Will be used for learning.
:param theta: Theta
:param max_iter: A maximum number of iterations that self-learning does.
:return: The final classification model H that has been trained on (x_l, y_l)
and pseudo-labeled (x_u, yPred)
"""
if 'n_estimators' not in kwargs:
n_est = 200
else:
n_est = kwargs['n_estimators']
if 'random_state' not in kwargs:
rand_state = None
else:
rand_state = kwargs['random_state']
classifier = RandomForestClassifier(n_estimators=n_est, oob_score=True, n_jobs=-1, random_state=rand_state)
l = x_l.shape[0]
sample_distr = np.repeat(1 / l, l)
n = 1
b = True
while b:
u = x_u.shape[0]
# Learn a classifier
H = classifier
H.fit(x_l, y_l, sample_weight=sample_distr)
margin_u = H.predict_proba(x_u)
pred_u = np.argmax(margin_u, axis=1)
# Select observations with argmax margin more than corresponding theta
selection = np.array(margin_u[np.arange(u), pred_u] >= theta)
x_s = x_u[selection, :]
y_s = pred_u[selection]
# Move them from the unlabeled set to the train one
x_l = np.concatenate((x_l, x_s))
y_l = np.concatenate((y_l, y_s))
x_u = np.delete(x_u, np.where(selection), axis=0)
s = x_l.shape[0] - l
if x_s.shape[0] == 0:
b = False
continue
sample_distr = np.concatenate((np.repeat(1 / l, l), np.repeat(1 / s, s)))
# Stop criterion
if x_u.shape[0] == 0:
b = False
n += 1
if n == max_iter:
b = False
return H
| 6,771
| 32.86
| 117
|
py
|
trans-bounds-maj-vote
|
trans-bounds-maj-vote-master/tsvm.py
|
import numpy as np
import pandas as pd
import subprocess
import os
from sklearn.datasets import dump_svmlight_file
import aux_functions as af
def create_folders(db_name, num_exp):
try:
os.mkdir("output")
except FileExistsError:
pass
try:
os.mkdir("output/"+db_name)
except FileExistsError:
pass
try:
os.mkdir("output/"+db_name+"/tsvm")
except FileExistsError:
pass
if num_exp is not None:
subpath = db_name+"/tsvm/"+str(num_exp)
try:
os.mkdir("output/" + subpath)
except FileExistsError:
pass
else:
subpath = db_name + "/tsvm"
try:
os.mkdir("output/"+subpath+"/files")
except FileExistsError:
pass
try:
os.mkdir("output/"+subpath+"/models")
except FileExistsError:
pass
try:
os.mkdir("output/"+subpath+"/predictions")
except FileExistsError:
pass
def get_pred_values(path):
y = pd.read_csv(path, sep=' ', header=None)
y = y[0]
y = y.apply(lambda x: int(str.split(x, ':')[1]), 1)
return y
def ova_tsvm(x_l, y_l, x_u, y_u, db_name="tmp", num_exp=None, timeout=None):
K = len(np.unique(y_l))
x_train, y_train, y_u_shuffled = af.partially_labeled_view(x_l, y_l, x_u, y_u)
# create folders for storing results
create_folders(db_name, num_exp)
if num_exp is not None:
subpath = db_name + "/tsvm/" + str(num_exp)
else:
subpath = db_name + "/tsvm"
ovapreds = []
for k in range(K):
y_train_k = np.array(list(map(lambda label: 1 if label == k else (0 if label == -1 else -1), y_train)))
path_file = "output/" + subpath + "/files/df_class_" + str(k)
path_model = "output/" + subpath + "/models/model_class_" + str(k)
path_prediction = "output/" + subpath + "/predictions/pred_class_" + str(k)
open(path_file, 'a').close()
dump_svmlight_file(x_train, y_train_k, path_file, zero_based=False)
# form a run command to create process of learning tsvm
cmd = form_cmd(path_file, path_model, path_prediction)
try:
subprocess.run(cmd, stdout=subprocess.PIPE, timeout=timeout)
ovapreds.append(get_pred_values(path_prediction))
except subprocess.TimeoutExpired:
raise TimeoutError("The algorithm has not converged!")
ovapreds = np.array(ovapreds).T
y_pred = np.apply_along_axis(ova_voting, 1, ovapreds)
return y_u_shuffled, y_pred
def ova_voting(preds_for_x):
inds = [idx for idx in range(len(preds_for_x)) if preds_for_x[idx] == 1]
if inds == []:
return np.random.choice(np.arange(len(preds_for_x)), 1)[0]
else:
return np.random.choice(inds, 1)[0]
def form_cmd(path_file, path_model, path_prediction):
cmd = list()
# binary for learning
cmd.append('./svm_light/svm_learn')
# these options can be tried
# cmd.append('-n')
# cmd.append('5')
# cmd.append('-e')
# cmd.append('0.01')
# classify unlabelled examples to the following file
cmd.append('-l')
cmd.append(path_prediction)
# data path
cmd.append(path_file)
# a file for the learning model
cmd.append(path_model)
return cmd
| 3,258
| 30.038095
| 111
|
py
|
trans-bounds-maj-vote
|
trans-bounds-maj-vote-master/experiment_test.py
|
# classifiers
from sklearn.semi_supervised import LabelPropagation
from sklearn.ensemble import RandomForestClassifier
import tsvm
import self_learning as sl
# auxiliary functions
from aux_functions import ReadDataset, partially_labeled_view
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score
# other packages
import numpy as np
import time
import sys
import warnings
warnings.filterwarnings("ignore")
def experiment_test(x, y, db_name, unlab_size, N=20):
# rf, msla, fsla result matrices:
# 1st component: experiment number
# 2nd component: 0 - accuracy score, 1 - f1 score, 2 - runtime
rf = np.zeros((N, 3))
msla = np.zeros((N, 3))
fsla = np.zeros((N, 3))
ls = np.zeros((N, 3))
ova_tsvm = np.zeros((N, 3))
for n in range(N):
# split on labeled and unlabeled parts
x_l, x_u, y_l, y_u = train_test_split(x, y, test_size=unlab_size, random_state=n * 10)
# display information about data split for the first iteration:
if n == 0:
print("data split for the first iteration:")
print("shape of labeled part:")
print(x_l.shape, y_l.shape)
print("shape of unlabeled part:")
print(x_u.shape, y_u.shape)
print("class distribution of labeled examples:")
print([np.sum(y_l == i) for i in range(len(np.unique(y)))])
print("class distribution of unlabeled examples:")
print([np.sum(y_u == i) for i in range(len(np.unique(y)))])
print()
# partially labeled view
x_train, y_train, y_u_shuffled = partially_labeled_view(x_l, y_l, x_u, y_u)
# purely supervised classification
model = RandomForestClassifier(n_estimators=200, oob_score=True, n_jobs=-1, random_state=n * 10)
t0 = time.time()
model.fit(x_l, y_l)
y_pred = model.predict(x_u)
t1 = time.time()
print("random forest is finished, experiment", n)
rf[n, 0] = accuracy_score(y_u, y_pred)
rf[n, 1] = f1_score(y_u, y_pred, average="weighted")
rf[n, 2] = t1 - t0
# label propagation
t0 = time.time()
label_prop_model = LabelPropagation(gamma=0.01, n_jobs=-1, tol=1e-3)
label_prop_model.fit(x_train, y_train)
y_pred = label_prop_model.predict(x_train[y_train == -1, :])
t1 = time.time()
print("label propagation is finished, experiment", n)
ls[n, 0] = accuracy_score(y_u_shuffled, y_pred)
ls[n, 1] = f1_score(y_u_shuffled, y_pred, average="weighted")
ls[n, 2] = t1 - t0
# tsvm
t0 = time.time()
y_u_shuffled, y_pred = tsvm.ova_tsvm(x_l, y_l, x_u, y_u, db_name=db_name, num_exp=n, timeout=None)
t1 = time.time()
print("tsvm is finished, experiment", n)
ova_tsvm[n, 0] = accuracy_score(y_u_shuffled, y_pred)
ova_tsvm[n, 1] = f1_score(y_u_shuffled, y_pred, average="weighted")
ova_tsvm[n, 2] = t1 - t0
# multi-class self-learning algorithm with fixed theta
theta = 0.7
max_iter = 10
t0 = time.time()
model = sl.fsla(x_l, y_l, x_u, theta, max_iter, random_state=n * 10)
y_pred = model.predict(x_u)
t1 = time.time()
print("fsla is finished, experiment", n)
fsla[n, 0] = accuracy_score(y_u, y_pred)
fsla[n, 1] = f1_score(y_u, y_pred, average="weighted")
fsla[n, 2] = t1 - t0
# multi-class self-learning algorithm
t0 = time.time()
model, thetas = sl.msla(x_l, y_l, x_u, random_state=n * 10)
y_pred = model.predict(x_u)
t1 = time.time()
print("msla is finished, experiment", n)
msla[n, 0] = accuracy_score(y_u, y_pred)
msla[n, 1] = f1_score(y_u, y_pred, average="weighted")
msla[n, 2] = t1 - t0
print("experiment", n, "is done")
acc = np.vstack((
rf[:, 0],
ls[:, 0],
ova_tsvm[:, 0],
fsla[:, 0],
msla[:, 0]
)).T
f1 = np.vstack((
rf[:, 1],
ls[:, 1],
ova_tsvm[:, 1],
fsla[:, 1],
msla[:, 1]
)).T
acc_mean = np.mean(acc, axis=0)
acc_std = np.std(acc, axis=0)
f1_mean = np.mean(f1, axis=0)
f1_std = np.std(f1, axis=0)
np.savetxt("output/" + db_name + '/acc_mean.txt', np.round(acc_mean, 4))
np.savetxt("output/" + db_name + '/acc_std.txt', np.round(acc_std, 4))
np.savetxt("output/" + db_name + '/acc_full.txt', np.round(acc, 4))
np.savetxt("output/" + db_name + '/f1_mean.txt', np.round(f1_mean, 4))
np.savetxt("output/" + db_name + '/f1_std.txt', np.round(f1_std, 4))
np.savetxt("output/" + db_name + '/f1_full.txt', np.round(f1, 4))
if __name__ == '__main__':
arguments = sys.argv[1:]
database = arguments[0]
split = float(arguments[1])
read_data = ReadDataset()
x, y = read_data.read(database)
experiment_test(x, y, db_name=database, unlab_size=split, N=1)
| 5,008
| 35.297101
| 106
|
py
|
trans-bounds-maj-vote
|
trans-bounds-maj-vote-master/simple_test.py
|
# classifiers
from sklearn.semi_supervised import LabelPropagation
from sklearn.ensemble import RandomForestClassifier
import tsvm
import self_learning as sl
# auxiliary functions
from aux_functions import ReadDataset, partially_labeled_view
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score
# other packages
import matplotlib.pyplot as plt
import numpy as np
import time
import warnings
warnings.filterwarnings("ignore")
def plot_graph(acc, f1):
plt.subplots()
index = np.arange(5)
bar_width = 0.35
opacity = 0.8
plt.bar(index, acc, bar_width,
alpha=opacity,
color='b',
label='ACC')
plt.bar(index + bar_width, f1, bar_width,
alpha=opacity,
color='r',
label='F1')
plt.xlabel('Method')
plt.ylabel('Score')
plt.title('Performance Results')
plt.xticks(index + bar_width, ('RF', 'LS', 'TSVM', 'FSLA', 'MSLA'))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.tight_layout()
plt.show()
def simple_test():
# read and split data
read_data = ReadDataset()
x, y = read_data.read("dna")
x_l, x_u, y_l, y_u = train_test_split(x, y, test_size=0.99, random_state=40)
print("shape of labeled part:")
print(x_l.shape, y_l.shape)
print("shape of unlabeled part:")
print(x_u.shape, y_u.shape)
print("class distribution of labeled examples:")
print([np.sum(y_l == i) for i in range(len(np.unique(y)))])
print("class distribution of unlabeled examples:")
print([np.sum(y_u == i) for i in range(len(np.unique(y)))])
print()
# partially labeled view
x_train, y_train, y_u_shuffled = partially_labeled_view(x_l, y_l, x_u, y_u)
# purely supervised classification
print("random forest:")
t0 = time.time()
model = RandomForestClassifier(n_estimators=200, oob_score=True, n_jobs=-1, random_state=40)
model.fit(x_l, y_l)
y_pred = model.predict(x_u)
acc = [accuracy_score(y_u, y_pred)]
f1 = [f1_score(y_u, y_pred, average="weighted")]
print("accuracy:", acc[0])
print("f1-score:", f1[0])
t1 = time.time()
print("random forest is done")
print("time:", t1-t0, "seconds")
print()
# label propagation
print("label propagation:")
t0 = time.time()
label_prop_model = LabelPropagation(gamma=0.01, n_jobs=-1, tol=1e-3)
label_prop_model.fit(x_train, y_train)
y_pred = label_prop_model.predict(x_train[y_train == -1, :])
acc.append(accuracy_score(y_u_shuffled, y_pred))
f1.append(f1_score(y_u_shuffled, y_pred, average="weighted"))
print("accuracy:", acc[1])
print("f1-score:", f1[1])
t1 = time.time()
print("label propagation is done!")
print("time:", t1 - t0, "seconds")
print()
# tsvm
print("tsvm:")
t0 = time.time()
y_u_shuffled, y_pred = tsvm.ova_tsvm(x_l, y_l, x_u, y_u, db_name="dna", timeout=None)
acc.append(accuracy_score(y_u_shuffled, y_pred))
f1.append(f1_score(y_u_shuffled, y_pred, average="weighted"))
print("accuracy:", acc[2])
print("f1-score:", f1[2])
t1 = time.time()
print("tsvm is done!")
print("time:", t1 - t0, "seconds")
# multi-class self-learning algorithm with fixed theta
theta = 0.7
max_iter = 10
print("fsla with theta={}:".format(theta))
t0 = time.time()
model = sl.fsla(x_l, y_l, x_u, theta, max_iter, random_state=40)
y_pred = model.predict(x_u)
acc.append(accuracy_score(y_u, y_pred))
f1.append(f1_score(y_u, y_pred, average="weighted"))
print("accuracy:", acc[3])
print("f1-score:", f1[3])
t1 = time.time()
print("fsla is done!")
print("time:", t1-t0, "seconds")
print()
# multi-class self-learning algorithm
print("msla:")
t0 = time.time()
model, thetas = sl.msla(x_l, y_l, x_u, random_state=40)
y_pred = model.predict(x_u)
print("optimal theta at each step:")
print(thetas)
acc.append(accuracy_score(y_u, y_pred))
f1.append(f1_score(y_u, y_pred, average="weighted"))
print("accuracy:", acc[4])
print("f1-score:", f1[4])
t1 = time.time()
print("msla is done!")
print("time:", t1-t0, "seconds")
print()
# plot a graph
plot_graph(acc, f1)
if __name__ == '__main__':
simple_test()
| 4,337
| 29.765957
| 96
|
py
|
trans-bounds-maj-vote
|
trans-bounds-maj-vote-master/aux_functions.py
|
import numpy as np
from sklearn.datasets import load_svmlight_file
class ReadDataset:
"""
A class to read different datasets in numpy.ndarray format.
1. DNA Data Set: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html
2. Vowel Data Set: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html
3. Pendigits Data Set: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html
4. MNIST Data Set: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html
5. SensIT Vehicle Data Set: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html
"""
def __init__(self):
self._datasets = {
'dna': _read_dna,
# 'mnist': _read_mnist,
'pendigits': _read_pendigits#,
# 'vehicle': _read_vehicle,
# 'vowel': _read_vowel
}
def read(self, name):
if name in self._datasets:
return self._datasets[name]()
else:
raise KeyError("There is no dataset with this name. Check description of ReadDataset")
def _read_dna():
df1 = load_svmlight_file("data/dna.scale")
x1 = df1[0].todense()
y1 = df1[1]
df2 = load_svmlight_file("data/dna.scale.test")
x2 = df2[0].todense()
y2 = df2[1]
x = np.concatenate((x1, x2))
y = np.concatenate((y1, y2))
# label transform to 0..K-1
y -= 1
return x, y
def _read_pendigits():
df = load_svmlight_file("data/pendigits")
x = df[0].todense()
y = df[1]
return x, y
def partially_labeled_view(x_l, y_l, x_u, y_u):
y_undefined = np.repeat(-1, np.shape(x_u)[0])
y_train = np.concatenate((y_l, y_undefined))
x_train = np.concatenate((x_l, x_u))
y_true = np.concatenate((y_l, y_u))
n = np.size(y_train)
# Shuffle observations
shuffle = np.random.choice(np.arange(n), n, replace=False)
y_train = y_train[shuffle]
x_train = x_train[shuffle]
y_true = y_true[shuffle]
y_true_unlab = y_true[y_train == -1]
return x_train, y_train, y_true_unlab
| 2,087
| 31.625
| 103
|
py
|
trans-bounds-maj-vote
|
trans-bounds-maj-vote-master/new-module/pseudo_label_policy.py
|
import numpy as np
# import pyximport; pyximport.install(setup_args={"include_dirs": np.get_include()},
# language_level="3", reload_support=True)
from .self_learning_cython import c_optimal_threshold_vector
class Policy:
def __init__(self):
self.returned_subset = np.array([])
def choose(self, log):
return self.returned_subset
class RandomSubset(Policy):
def __init__(self, num_take=0.1):
super().__init__()
self.num_take = num_take
self._validate_num_take()
def _validate_num_take(self):
if np.isreal(self.num_take):
if self.num_take <= 0:
raise KeyError("The parameter num_take must be a positive number.")
if 1 <= self.num_take != int(self.num_take):
raise KeyError("The parameter num_take must be either a float within [0,1) or an integer >= 1.")
else:
raise KeyError("The parameter num_take must be either a float within [0,1) or an integer >= 1.")
def choose(self, log):
x_u = log.current_iteration["x_u"]
u = x_u.shape[0]
if type(self.num_take) == float:
n_take = int(self.num_take * u)
else:
n_take = self.num_take
self.returned_subset = np.random.choice(np.arange(u), n_take, replace=False)
class MostConfident(Policy):
def __init__(self, num_take=0.1):
super().__init__()
self.num_take = num_take
self._validate_num_take()
def _validate_num_take(self):
if np.isreal(self.num_take):
if self.num_take <= 0:
raise KeyError("The parameter num_take must be a positive number.")
if 1 <= self.num_take != int(self.num_take):
raise KeyError("The parameter num_take must be either a float within [0,1) or an integer >= 1.")
else:
raise KeyError("The parameter num_take must be either a float within [0,1) or an integer >= 1.")
def choose(self, log):
x_u = log.current_iteration["x_u"]
estimator = log.current_iteration["estimator"]
prediction_vote_u = estimator.predict_proba(x_u).max(axis=1)
u = x_u.shape[0]
if type(self.num_take) == float:
n_take = int(self.num_take * u)
else:
n_take = self.num_take
self.returned_subset = np.argsort(prediction_vote_u)[::-1][:n_take]
class ConfidenceThreshold(Policy):
def __init__(self, theta=None):
super().__init__()
if theta is None:
self.theta = 1
else:
self.theta = theta
def choose(self, log):
y_l = log.current_iteration["y_l"]
K = np.unique(y_l).size
x_u = log.current_iteration["x_u"]
estimator = log.current_iteration["estimator"]
vote_u = estimator.predict_proba(x_u)
y_pred_u = vote_u.argmax(axis=1)
u = x_u.shape[0]
is_theta_vector = self._validate_theta(K)
if is_theta_vector:
self.returned_subset = np.where(vote_u[np.arange(u), y_pred_u] >= self.theta[y_pred_u])[0]
else:
self.returned_subset = np.where(vote_u[np.arange(u), y_pred_u] >= self.theta)[0]
def _validate_theta(self, K):
if type(self.theta) in [float, int]:
if not (0 <= self.theta <= 1):
raise KeyError("theta's value(s) must lie within the interval [0,1].")
else:
return False
elif type(self.theta) == np.ndarray or type(self.theta) == list:
self.theta = np.array(self.theta)
if not np.all(np.logical_and(self.theta >= 0, self.theta <= 1)):
raise KeyError("theta's value(s) must lie within the interval [0,1].")
elif self.theta.size == K:
return True
else:
raise KeyError("theta must be either a scalar or a vector of size K.")
class MeanPredictionVote(Policy):
def __init__(self):
super().__init__()
self.theta = None
def choose(self, log):
x_u = log.current_iteration["x_u"]
estimator = log.current_iteration["estimator"]
prediction_vote_u = estimator.predict_proba(x_u).max(axis=1)
self.theta = np.mean(prediction_vote_u)
base_policy = ConfidenceThreshold(theta=self.theta)
base_policy.choose(log)
self.returned_subset = base_policy.returned_subset
class TransductiveConditionalError(Policy):
def __init__(self, cython=True, sup_prob=False, worst_prob=False, fixed_prob=None):
super().__init__()
self.cython = cython
self.sup_prob = sup_prob
self.worst_prob = worst_prob
self.fixed_prob = fixed_prob
self.theta = None
def choose(self, log):
x_u = log.current_iteration["x_u"]
x_l = log.current_iteration["x_l"]
y_l = log.current_iteration["y_l"]
estimator = log.current_iteration["estimator"]
K = np.unique(y_l).size
vote_u = estimator.predict_proba(x_u)
if self.sup_prob:
if len(log.previous_iterations) == 0:
prob_u = vote_u
else:
prob_u = log.previous_iterations[0]['estimator'].predict_proba(x_u)
elif self.worst_prob:
prob_u = np.full(vote_u.shape, 1/K)
elif self.fixed_prob is not None:
prob_u = self.fixed_prob
else:
prob_u = vote_u
y_pred_u = vote_u.argmax(axis=1)
if self.cython:
self.theta = c_optimal_threshold_vector(vote_u, prob_u, y_pred_u, K)
else:
self.theta = _optimal_threshold_vector(vote_u, prob_u, y_pred_u, K)
base_policy = ConfidenceThreshold(theta=self.theta)
base_policy.choose(log)
self.returned_subset = base_policy.returned_subset
class CurriculumLearning(Policy):
def __init__(self, curriculum_step=0.2):
super().__init__()
self.curriculum_step = curriculum_step
self.theta = None
def choose(self, log):
x_u = log.current_iteration["x_u"]
estimator = log.current_iteration["estimator"]
vote_u = estimator.predict_proba(x_u)
quantile = 1 - (log.idx_iter + 1) * self.curriculum_step
if quantile < self.curriculum_step:
quantile = 0
self.theta = np.quantile(vote_u.max(axis=1), quantile)
base_policy = ConfidenceThreshold(theta=self.theta)
base_policy.choose(log)
self.returned_subset = base_policy.returned_subset
def _joint_bayes_risk(vote, prob, pred, i, j, theta, sampling_rate=50):
# ui = \sum_{x\in X_U} \I{y=i} =approx.= \sum_{x\in X_U} m_Q(x,i)
ui = np.sum(vote[:, i])
vote_j = vote[:, j]
prob_i = prob[:, i]
# gammas = sorted(list(set(vote_j[vote_j > theta])))
gammas = theta + (1 - theta) * (np.arange(sampling_rate) + 1) / sampling_rate
infimum = 1e+05
upper_bounds = []
# for gamma in gammas:
for n in range(np.size(gammas)):
gamma = gammas[n]
I_ij = np.sum(prob_i[np.array((vote_j < gamma) & (vote_j >= theta))]) / ui
K_ij = np.dot(prob_i, np.array(pred == j) * vote_j) / ui
# M-less of gamma
Mg_ij = np.dot(prob_i, np.array(vote_j < gamma) * vote_j) / ui
# M-less of theta
Mt_ij = np.dot(prob_i, np.array(vote_j < theta) * vote_j) / ui
A = K_ij + Mt_ij - Mg_ij
upper_bound = I_ij + (A * (A > 0)) / gamma
upper_bounds.append(upper_bound)
if upper_bound < infimum:
infimum = upper_bound
if n > 3:
if upper_bounds[-1] > upper_bounds[-2] >= upper_bounds[-3]:
break
return infimum
def _optimal_threshold_vector(vote, prob, pred, K, sampling_rate=50):
theta = []
def reduction(matrix, prob):
u, K = prob.shape
count_class = np.array([np.sum(prob[:, j]) for j in range(K)])
return (1 / u) * np.dot(count_class, np.sum(matrix, axis=1))
u = vote.shape[0]
for k in range(K):
# A set of possible thetas:
theta_min = np.min(vote[:, k])
theta_max = np.max(vote[:, k])
thetas = theta_min + np.arange(sampling_rate) * (theta_max - theta_min) / sampling_rate
JBR = []
BE = []
for n in range(sampling_rate):
matrix = np.zeros((K, K))
for i in range(K):
if i == k:
continue
else:
matrix[i, k] = _joint_bayes_risk(vote, prob, pred, i, k, thetas[n])
if (i == 0) and (k == 1):
JBR.append(matrix[i, k])
pbl = (1 / u) * np.sum((vote[:, k] >= thetas[n]) & (pred == k))
if pbl == 0:
pbl = 1e-15
BE.append(reduction(matrix, prob)/pbl)
if n > 3:
if BE[-1] > BE[-2] >= BE[-3]:
break
BE = np.array(BE)
num = np.argmin(BE)
if type(num) is list:
num = num[0]
theta.append(thetas[num])
return np.array(theta)
| 9,110
| 36.187755
| 112
|
py
|
trans-bounds-maj-vote
|
trans-bounds-maj-vote-master/new-module/self_learning.py
|
from .pseudo_label_policy import *
import numpy as np
from copy import deepcopy
class SelfLearning:
def __init__(self, base_estimator=None, policy='confidence', voting='soft', theta='auto', cython=True,
sup_prob=True, worst_prob=False, fixed_prob=None, num_take=None, decreased_pl_weights=True,
max_iter=None, restart=False, curriculum_step=0.2, semisup_base=False, random_state=None):
# TODO: option to enforce pseudo-labeling all unlab. examples; shuffle unlab. examples back
self.policy = policy
self.num_take = num_take
# TODO: voting is not used
self.voting = voting
self.theta = theta
self.cython = cython
self.sup_prob = sup_prob
self.worst_prob = worst_prob
self.fixed_prob = fixed_prob
self.max_iter = max_iter
if self.max_iter is not None:
if type(self.max_iter) != int:
raise KeyError("max_iter must be either None or a strictly positive integer")
elif self.max_iter < 1:
raise KeyError("max_iter must be either None or a strictly positive integer")
self.restart = restart
self.curriculum_step = curriculum_step
if self.restart and self.max_iter is None and "cur" not in self.theta:
raise KeyError("max_iter must be set to a strictly positive integer if restart is True "
"and if it is not curriculum learning")
self._initialize_policy_()
self.decreased_pl_weights = decreased_pl_weights
self.semisup_base = semisup_base
self.random_state = random_state
# validate base_estimator
self.base_estimator = base_estimator
self._validate_base_estimator()
# initialize self.base_estimator_
self._initialize_base_estimator_()
self.init_classifier = None
self.final_classifier = None
self.log_all_iterations = None
self.x_pl = None
self.y_pl = None
def fit(self, x_l, y_l, x_u):
"""
:param x_l: Labeled training observations
:param y_l: Labels
:param x_u: Unlabeled training observations
"""
if self.random_state is not None:
np.random.seed(self.random_state)
l = x_l.shape[0]
u = x_u.shape[0]
# initialize log to store basic info about each iteration of self-learning
log = LogStorer(x_l, y_l, x_u)
cond = True
it = 0
idx_u = np.arange(u)
sample_weight = None
# initialization: supervised model trained on labeled examples only
estimator = deepcopy(self.base_estimator_)
if self.semisup_base:
estimator.fit(x_l, y_l, x_u)
else:
estimator.fit(x_l, y_l)
log.update(estimator, x_l, y_l, x_u, [], None)
# start of self-learning
while cond:
it += 1
# choose examples to pseudo-label
self.policy_.choose(log=log)
selection = self.policy_.returned_subset
# stop if there is no anything to add:
if selection.size == 0:
cond = False
continue
# select the examples and pseudo-label them
x_s = x_u[selection, :]
y_s = estimator.predict(x_s)
idx_s = idx_u[selection]
if self.restart:
# move them from the unlabeled set to the labeled one
x_l = np.concatenate((x_l[:l], x_s))
y_l = np.concatenate((y_l[:l], y_s))
idx_pl = idx_s
else:
# move them from the unlabeled set to the labeled one
x_l = np.concatenate((x_l, x_s))
y_l = np.concatenate((y_l, y_s))
idx_pl = np.concatenate((log.current_iteration['idx_pl'], idx_s))
x_u = np.delete(x_u, selection, axis=0)
idx_u = np.delete(idx_u, selection)
if self.theta is None:
theta = None
else:
theta = self.policy_.theta
# if True, the weight of pseudo-labeled examples is decreased
if self.decreased_pl_weights:
u_pl = x_l.shape[0] - l
sample_weight = np.concatenate((np.repeat(1 / l, l), np.repeat(1 / u_pl, u_pl)))
# learn a new classifier
estimator = deepcopy(self.base_estimator_)
if self.semisup_base:
estimator.fit(x_l, y_l, x_u, sample_weight=sample_weight)
else:
if sample_weight is None:
estimator.fit(x_l, y_l)
else:
estimator.fit(x_l, y_l, sample_weight=sample_weight)
# update log
log.update(estimator, x_l, y_l, x_u, idx_pl, theta)
# stop if max_iter is reached
if it == self.max_iter:
cond = False
# stop if all unlabeled examples are pseudo-labeled
if x_l.shape[0] == l + u:
cond = False
self.init_classifier = log.previous_iterations[0]['estimator']
self.final_classifier = log.current_iteration['estimator']
log_all_iterations = log.previous_iterations
log_all_iterations.append(log.current_iteration)
self.log_all_iterations = log_all_iterations
self.x_pl = log.current_iteration['x_l']
self.y_pl = log.current_iteration['y_l']
def predict(self, x, supervised=False):
if supervised:
return self.init_classifier.predict(x)
else:
return self.final_classifier.predict(x)
def predict_proba(self, x, supervised=False):
if supervised:
return self.init_classifier.predict_proba(x)
else:
return self.final_classifier.predict_proba(x)
def _validate_base_estimator(self):
if self.base_estimator is not None:
base_estimator_methods = ['fit', 'predict']
if self.policy == 'confidence':
base_estimator_methods.append('predict_proba')
if self.theta != 'auto':
if not np.all(list(map(lambda method: hasattr(self.base_estimator, method), base_estimator_methods))):
raise KeyError("base_estimator doesn't contain one or any of the following methods: " +
", ".join(base_estimator_methods))
else:
if not np.all(list(map(lambda method: hasattr(self.base_estimator, method), base_estimator_methods))):
raise KeyError("base_estimator doesn't contain one or any of the following methods: " +
", ".join(base_estimator_methods))
def _initialize_policy_(self):
if self.policy == 'confidence':
if self.num_take is not None:
self.policy_ = MostConfident(num_take=self.num_take)
# theta is enforced to be None if policy_ is MostConfident
self.theta = None
else:
if self.theta == 'mean':
self.policy_ = MeanPredictionVote()
elif self.theta == 'auto':
self.policy_ = TransductiveConditionalError(cython=self.cython, sup_prob=self.sup_prob,
worst_prob=self.worst_prob, fixed_prob=self.fixed_prob)
elif self.theta == 'curriculum':
self.policy_ = CurriculumLearning(curriculum_step=self.curriculum_step)
else:
self.policy_ = ConfidenceThreshold(theta=self.theta)
elif self.policy == 'random':
self.policy_ = RandomSubset(num_take=self.num_take)
# theta is enforced to be None if policy_ is RandomSubset
self.theta = None
else:
# TODO: possibility of custom policy
raise KeyError("policy must be either confidence or random.")
def _initialize_base_estimator_(self):
if self.base_estimator is not None:
self.base_estimator_ = self.base_estimator
self._agree_random_state()
# by default, base_estimator_ is a random forest
else:
from sklearn.ensemble import RandomForestClassifier
self.base_estimator_ = RandomForestClassifier(random_state=self.random_state)
def _agree_random_state(self):
# if base_estimator_ has random_state attribute
if hasattr(self.base_estimator_, 'random_state'):
# and if random_state is not default,
# replace the rs of base_estimator_ by random_state
if self.random_state is not None:
# we raise the warning, if the rd of base_estimator_ is not None initially
if self.random_state != self.base_estimator_.random_state is not None:
raise Warning("random state of base_estimator_ is set to " + str(self.random_state))
self.base_estimator_.random_state = self.random_state
class LogStorer:
def __init__(self, x_l, y_l, x_u):
self.x_l = x_l
self.y_l = y_l
self.x_u = x_u
self.current_iteration = None
self.previous_iterations = list()
self.idx_iter = 0
def update(self, estimator, x_l, y_l, x_u, idx_pl, theta=None):
if self.current_iteration is not None:
self.previous_iterations.append(self.current_iteration)
self.current_iteration = {
"estimator": estimator,
"x_l": x_l,
"y_l": y_l,
"x_u": x_u,
"idx_pl": idx_pl,
"theta": theta,
}
self.idx_iter += 1
| 9,772
| 43.221719
| 119
|
py
|
trans-bounds-maj-vote
|
trans-bounds-maj-vote-master/new-module/setup.py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy as np
extensions = [
Extension("self_learning_cython", sources=["self_learning_cython.pyx"], libraries=["m"],
include_dirs=[np.get_include()], extra_compile_args=["-O3", "-ffast-math", "-march=native", "-fopenmp"],
extra_link_args=['-fopenmp'], reload_support=True)
]
setup(
name="probabilistic_classifier_cython", cmdclass={"build_ext": build_ext},
ext_modules=cythonize(extensions, language_level="3"),
)
| 611
| 35
| 118
|
py
|
trans-bounds-maj-vote
|
trans-bounds-maj-vote-master/new-module/__init__.py
|
# -*- coding: utf-8 -*-
"""Self-learning module."""
__author__ = """Vasilii Feofanov"""
__email__ = 'vasilii.feofanov@gmail.com'
__version__ = '0.1.0'
from .self_learning import SelfLearning
__all__ = ["SelfLearning"]
| 222
| 17.583333
| 40
|
py
|
scientific-re
|
scientific-re-main/main.py
|
import os
import random
import torch
import numpy
import torch.backends.cudnn
from src.data.prepare_data import Preparedata
from src.model.CNN import CNN
from src.model.run import Run
from src.parameters.parameters import Parameters
class Controller(Parameters):
def __init__(self):
# prepare the data
self.data = Preparedata(Parameters)
self.data.prepare_data(Parameters)
def initialise_model(self):
# prepare the model
self.model = CNN(Parameters)
def train(self):
# train the model and return train and dev scores
self.ran_model = Run(self.model, self.data, Parameters)
macro_fscores_train, macro_fscores_dev = self.ran_model.train()
return macro_fscores_train, macro_fscores_dev
def test(self):
# run the model on the test data
macro_fscore_test = self.ran_model.test()
return macro_fscore_test
def block_seeds(seed):
torch.manual_seed(int(seed))
torch.cuda.manual_seed_all(int(seed))
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
numpy.random.seed(int(seed))
random.seed(int(seed))
os.environ['PYTHONHASHSEED'] = str(seed)
if __name__ == '__main__':
params = Parameters()
print(f'--------------------- Preparing data ---------------------')
controller = Controller()
train, dev, test = [], [], []
for seed in params.seeds:
print(f'--------------- Seed: {seed} ---------------')
block_seeds(seed)
controller.initialise_model()
macro_fscores_train, macro_fscores_dev = controller.train()
macro_fscore_test = controller.test()
train.append(macro_fscores_train)
dev.append(macro_fscores_dev)
test.append(macro_fscore_test)
mean_train = numpy.matrix(train).mean(axis=0).tolist()[0]
mean_dev = numpy.matrix(dev).mean(axis=0).tolist()[0]
print(f'Macro f-score on the test set: {round(numpy.mean(test), 2)}')
| 1,999
| 26.027027
| 73
|
py
|
scientific-re
|
scientific-re-main/src/data/prepare_data.py
|
import numpy
class Preparedata:
def __init__(self, params):
self.dataset_train = params.train
self.relations_train = params.train_relations
self.dataset_dev = params.dev
self.relations_dev = params.dev_relations
self.dataset_test = params.test
self.relations_test = params.test_relations
self.train = Data()
self.dev = Data()
self.test = Data()
def prepare_data(self, param):
self.lookuptables = LookUpTables(param)
sentences, pos1, pos2, y = self.generate_sentences(param.len_sentences, self.dataset_train, self.relations_train)
self.train.set_data(sentences, pos1, pos2, y)
sentences, pos1, pos2, y = self.generate_sentences(param.len_sentences, self.dataset_dev, self.relations_dev)
self.dev.set_data(sentences, pos1, pos2, y)
sentences, pos1, pos2, y = self.generate_sentences(param.len_sentences, self.dataset_test, self.relations_test)
self.test.set_data(sentences, pos1, pos2, y)
# Generate the instances
def generate_sentences(self, len_sentences, dataset_file, relation_file):
relations = []
with open(relation_file) as r:
id_sentence = r.readline().split('\t')[0].split('.')[0]
sentence = []
r.seek(0)
for line in r:
relation_parts = line.strip().split('\t')
if relation_parts[0].split('.')[0] == id_sentence:
sentence.append((relation_parts[0], relation_parts[1], relation_parts[2]))
else:
relations.append(sentence)
sentence = [(relation_parts[0], relation_parts[1], relation_parts[2])]
id_sentence = relation_parts[0].split('.')[0]
relations.append(sentence)
first = True
sentencetot, pos1tot, pos2tot, ytot = [], [], [], []
for rel_abstract in relations:
with open(dataset_file) as d:
tokens_abstract = [
(t.strip().split('\t')[0], int(t.strip().split('\t')[1]), int(t.strip().split('\t')[2]),
t.strip().split('\t')[3], t.strip().split('\t')[4]) for t in d if
t.strip().split('\t')[0] == rel_abstract[0][0].split('.')[0]]
for rel in rel_abstract:
ent1 = [elem for elem in tokens_abstract if elem[4] == rel[0]][0]
ent2 = [elem for elem in tokens_abstract if elem[4] == rel[1]][0]
tokens_sentence = []
if ent1[1] == ent2[1]:
tokens_sentence = [elem for elem in tokens_abstract if elem[1] == ent1[1]]
if len(tokens_sentence) <= len_sentences:
sentence = ''
pos1_emb, pos2_emb = [], []
for token in tokens_sentence:
# sentence
sentence += token[3] + ' '
# distance
pos1_token = token[2] - ent1[2]
if pos1_token < -10 or pos1_token > 10:
pos1_emb.append(self.lookuptables.position2id['LONG_DISTANCE'])
else:
pos1_emb.append(self.lookuptables.position2id[pos1_token])
pos2_token = token[2] - ent2[2]
if pos2_token < -10 or pos2_token > 10:
pos2_emb.append(self.lookuptables.position2id['LONG_DISTANCE'])
else:
pos2_emb.append(self.lookuptables.position2id[pos2_token])
pos1_emb.extend([self.lookuptables.position2id['<PAD>']] * (len_sentences - len(pos1_emb)))
pos2_emb.extend([self.lookuptables.position2id['<PAD>']] * (len_sentences - len(pos2_emb)))
if first == True:
sentencetot = [sentence]
pos1tot = [pos1_emb]
pos2tot = [pos2_emb]
first = False
else:
sentencetot = numpy.concatenate((sentencetot, [sentence]), axis=0)
pos1tot = numpy.concatenate((pos1tot, [pos1_emb]), axis=0)
pos2tot = numpy.concatenate((pos2tot, [pos2_emb]), axis=0)
ytot.append(self.lookuptables.relation2id[rel[2]])
return sentencetot, pos1tot, pos2tot, ytot
class Data:
def __init__(self):
self.sentence = None
self.pos1 = None
self.pos2 = None
self.y = None
def set_data(self, sentence, pos1, pos2, y):
self.sentence = sentence
self.pos1 = pos1
self.pos2 = pos2
self.y = y
class LookUpTables:
def __init__(self, params):
# positions
self.position2id = {position: idtable for position, idtable in zip([*range(params.min_position, params.max_position + 2, 1)], [*range(2, params.num_positions, 1)])}
self.position2id['LONG_DISTANCE'] = 1
self.position2id['<PAD>'] = 0
self.id2position = {v: k for k, v in self.position2id.items()}
# relations
self.relation2id = {item: index for index, item in enumerate(params.relations, 0)}
self.id2relation = {v: k for k, v in self.relation2id.items()}
| 5,379
| 40.705426
| 172
|
py
|
scientific-re
|
scientific-re-main/src/model/CNN.py
|
import torch
from torch import nn
import torch.nn.functional as F
class CNN(torch.nn.Module):
def __init__(self, params):
super().__init__()
self.device = params.device
self.dropout = nn.Dropout(params.dropout)
self.embedding_size = params.bert_emb_size + 2 * params.position_emb_size
# Embedding layers definition
self.emb_pos = nn.Embedding(params.num_positions, params.position_emb_size, padding_idx=0)
# Convolution layers definition
self.conv_1 = nn.Conv1d(in_channels=self.embedding_size, out_channels=params.out_size, kernel_size=params.kernels[0])
self.conv_2 = nn.Conv1d(in_channels=self.embedding_size, out_channels=params.out_size, kernel_size=params.kernels[1])
self.conv_3 = nn.Conv1d(in_channels=self.embedding_size, out_channels=params.out_size, kernel_size=params.kernels[2])
# Fully connected layer definition
self.fc = nn.Linear(3 * params.out_size, len(params.relations))
def forward(self, w, p1, p2):
# Prepare the input from the embeddings layers
pos1_emb = self.emb_pos(p1)
pos2_emb = self.emb_pos(p2)
x = torch.cat((w, pos1_emb, pos2_emb), 2)
x = x.permute(0,2,1)
# Convolution layer 1 is applied
x1 = torch.relu(self.conv_1(x))
x1 = F.max_pool1d(x1, kernel_size=x1.size(2)).squeeze(2)
# Convolution layer 2 is applied
x2 = torch.relu(self.conv_2(x))
x2 = F.max_pool1d(x2, x2.size(2)).squeeze(2)
# Convolution layer 3 is applied
x3 = torch.relu(self.conv_3(x))
x3 = F.max_pool1d(x3, x3.size(2)).squeeze(2)
# Linear layer
out = torch.cat((x1, x2, x3), dim=1)
out = self.fc(self.dropout(out))
return out
| 1,788
| 33.403846
| 125
|
py
|
scientific-re
|
scientific-re-main/src/model/run.py
|
import torch
from sklearn.metrics import f1_score
from torch import optim, nn
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModel
class DatasetMaper(Dataset):
def __init__(self, s, p1, p2, y):
self.s = s
self.p1 = p1
self.p2 = p2
self.y = y
def __len__(self):
return len(self.s)
def __getitem__(self, idx):
return self.s[idx], self.p1[idx], self.p2[idx], self.y[idx]
class Run:
def __init__(self, model, prepared_data, params):
self.data = prepared_data
# Prepare batches
train = DatasetMaper(self.data.train.sentence, self.data.train.pos1, self.data.train.pos2, self.data.train.y)
dev = DatasetMaper(self.data.dev.sentence, self.data.dev.pos1, self.data.dev.pos2, self.data.dev.y)
test = DatasetMaper(self.data.test.sentence, self.data.test.pos1, self.data.test.pos2, self.data.test.y)
self.loader_train = DataLoader(train, batch_size=params.batch_size)
self.loader_dev = DataLoader(dev, batch_size=params.batch_size)
self.loader_test = DataLoader(test, batch_size=params.batch_size)
self.device = params.device
self.len_sentences = params.len_sentences
self.relations = params.relations
self.epochs = params.epochs
self.batch_size = params.batch_size
self.model = model
self.optimizer = optim.AdamW(self.model.parameters(), lr=params.learning_rate)
self.loss = nn.CrossEntropyLoss(weight=torch.Tensor(self.compute_loss_weight()).to(self.device))
self.tokenizerBert = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')
self.modelBert = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased')
self.modelBert.to(self.device)
def train(self):
train_losses, dev_losses, macro_fscores_train, macro_fscores_dev = [], [], [], []
self.model.to(self.device)
for epoch in range(self.epochs):
running_loss = 0
# f-score train
tot_predictions_train, tot_targets_train = [], []
for s_batch, p1_batch, p2_batch, y_batch in self.loader_train:
# Train f-score
tot_targets_train = tot_targets_train + y_batch.tolist()
# BERT
sentences_list = [[token for token in sentence.split()] for sentence in s_batch]
tokens = self.tokenizerBert(sentences_list, return_offsets_mapping=True, is_split_into_words=True, padding='max_length', truncation=True, max_length=self.len_sentences)
encoded_sentences = []
for id_list, offset_list in zip(tokens['input_ids'], tokens['offset_mapping']):
encoded_sentence = []
for id, offset in zip(id_list, offset_list):
if offset[0] == 0 and offset[1] != 0:
encoded_sentence.append(id)
encoded_sentence.extend([0] * (self.len_sentences - len(encoded_sentence)))
encoded_sentences.insert(len(encoded_sentences), encoded_sentence)
embedsbert = self.modelBert(torch.LongTensor(encoded_sentences).to(self.device))[0]
# Move input tensors to the device
p1_batch, p2_batch, y_batch = p1_batch.to(self.device), p2_batch.to(self.device), torch.LongTensor(y_batch).to(self.device)
# Model prediction
self.optimizer.zero_grad()
prediction = self.model(embedsbert, p1_batch, p2_batch)
# f-score train
ps = torch.exp(prediction)
top_p, top_class = ps.topk(1, dim=1)
for elem in top_class:
tot_predictions_train.append(int(elem))
# Loss and backward step
loss = self.loss(prediction, y_batch)
running_loss += loss.item()
loss.backward()
self.optimizer.step()
else:
# Update train f-score
macro_fscore_train = round(f1_score(tot_targets_train, tot_predictions_train, average="macro") * 100, 2)
macro_fscores_train.append(macro_fscore_train)
# dev
dev_loss = 0
# dev f-score
tot_predictions_dev, tot_targets_dev = [], []
with torch.no_grad():
self.model.eval()
for s_batch_dev, p1_batch_dev, p2_batch_dev, y_batch_dev in self.loader_dev:
tot_targets_dev = tot_targets_dev + y_batch_dev.tolist()
# BERT
sentences_list_dev = [[token for token in sentence.split()] for sentence in s_batch_dev]
tokens_dev = self.tokenizerBert(sentences_list_dev, return_offsets_mapping=True, is_split_into_words=True, padding='max_length', truncation=True, max_length=self.len_sentences)
encoded_sentences_dev = []
for id_list, offset_list in zip(tokens_dev['input_ids'], tokens_dev['offset_mapping']):
encoded_sentence_dev = []
for id, offset in zip(id_list, offset_list):
if offset[0] == 0 and offset[1] != 0:
encoded_sentence_dev.append(id)
encoded_sentence_dev.extend([0] * (self.len_sentences - len(encoded_sentence_dev)))
encoded_sentences_dev.insert(len(encoded_sentences_dev), encoded_sentence_dev)
embedsbert_dev = self.modelBert(torch.LongTensor(encoded_sentences_dev).to(self.device))[0]
# Move input tensors to the device
p1_batch_dev, p2_batch_dev,y_batch_dev = p1_batch_dev.to(self.device), p2_batch_dev.to(self.device), torch.LongTensor(y_batch_dev).to(self.device)
# Model prediction
prediction = self.model(embedsbert_dev, p1_batch_dev, p2_batch_dev)
dev_loss += self.loss(prediction, y_batch_dev)
# From the model prediction to the original class
ps = torch.exp(prediction)
top_p, top_class = ps.topk(1, dim=1)
for elem in top_class:
tot_predictions_dev.append(int(elem))
self.model.train()
# Update f-score dev
macro_fscore_dev = round(f1_score(tot_targets_dev, tot_predictions_dev, average="macro") * 100, 2)
macro_fscores_dev.append(macro_fscore_dev)
# Update train and dev loss
train_losses.append(running_loss)
dev_losses.append(dev_loss)
print("Epoch: {}/{}".format(epoch + 1, self.epochs),
"Training Loss: {:.3f}".format(train_losses[-1]),
"Dev Loss: {:.3f}".format(dev_losses[-1]),
"Macro f-score train: {}".format(macro_fscore_train),
"Macro f-score dev: {}".format(macro_fscore_dev))
return macro_fscores_train, macro_fscores_dev
def test(self):
tot_predictions_test, tot_targets_test = [], []
with torch.no_grad():
self.model.eval()
for s_batch_test, p1_batch_test, p2_batch_test, y_batch_test in self.loader_test:
tot_targets_test = tot_targets_test + y_batch_test.tolist()
# BERT
sentences_list_test = [[token for token in sentence.split()] for sentence in s_batch_test]
tokens_test = self.tokenizerBert(sentences_list_test, return_offsets_mapping=True, is_split_into_words=True, padding='max_length', truncation=True, max_length=self.len_sentences)
encoded_sentences_test = []
for id_list, offset_list in zip(tokens_test['input_ids'], tokens_test['offset_mapping']):
encoded_sentence_test = []
for id, offset in zip(id_list, offset_list):
if offset[0] == 0 and offset[1] != 0:
encoded_sentence_test.append(id)
encoded_sentence_test.extend([0] * (self.len_sentences - len(encoded_sentence_test)))
encoded_sentences_test.insert(len(encoded_sentences_test), encoded_sentence_test)
embedsbert_test = self.modelBert(torch.LongTensor(encoded_sentences_test).to(self.device))[0]
p1_batch_test, p2_batch_test, y_batch_test = p1_batch_test.to(self.device), p2_batch_test.to(self.device), torch.LongTensor(y_batch_test).to(self.device)
prediction = self.model(embedsbert_test, p1_batch_test, p2_batch_test)
ps = torch.exp(prediction)
top_p, top_class = ps.topk(1, dim=1)
for elem in top_class:
tot_predictions_test.append(int(elem))
return round(f1_score(tot_targets_test, tot_predictions_test, average="macro") * 100, 2)
def compute_loss_weight(self):
relation_count = [0] * len(self.relations)
for label in self.data.train.y:
relation_count[label] += 1
tot = sum(relation_count)
relation_count = [1 / (elem / tot) if elem > 0 else 1 for elem in relation_count]
return relation_count
| 9,545
| 45.565854
| 200
|
py
|
scientific-re
|
scientific-re-main/src/parameters/parameters.py
|
from dataclasses import dataclass
import torch
@dataclass
class Parameters:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
seeds = [3828, 3152, 2396]
# Files
train = 'sample-data/sample-train.txt'
train_relations = 'sample-data/sample-train-rel.txt'
dev = 'sample-data/sample-dev.txt'
dev_relations = 'sample-data/sample-dev-rel.txt'
test = 'sample-data/ai-ml.txt'
test_relations = 'sample-data/ai-ml-rel.txt'
# Relations
relations = ['COMPARE','USAGE', 'MODEL-FEATURE', 'PART_WHOLE', 'RESULT']
# Data
min_position: int = -10
max_position: int = 10
# Preprocessing parameters
len_sentences: int = 105
n_pos: int = 18
# Model parameters
word_emb_fasttext_size: int = 300
position_emb_size: int = 50
bert_emb_size: int = 768
num_positions = max_position - min_position + 3 # add positions for 0, for <PAD> and for LONG DISTANCE
kernels = [2, 3, 4]
out_size: int = 15
stride: int = 1
dropout: int = 0.5
# Training parameters
epochs: int = 50
batch_size: int = 12
learning_rate: float = 0.001
| 1,140
| 25.534884
| 106
|
py
|
gaze-spirals
|
gaze-spirals-main/clock.py
|
import json
import argparse
from pupil_labs.realtime_api.simple import Device
from utils.utils_clock import blank_clock, create_clock
from utils.utils_linear import scanlines_from_pupil_device
parser = argparse.ArgumentParser()
parser.add_argument('--ip', required=True, type=str)
parser.add_argument('--port', required=False, type=str, default='8080')
args = parser.parse_args()
device = Device(address=args.ip, port=args.port)
print(f"Phone name: {device.phone_name}")
print(f"Battery level: {device.battery_level_percent}%")
print(f"Free storage: {device.memory_num_free_bytes / 1024**3:.1f} GB")
print(f"Serial number of connected glasses: {device.serial_number_glasses}")
with open('configurations/config_clock.json', 'r') as clock_file:
clock_config = json.load(clock_file)
spiral = blank_clock(clock_config)
scanlines = scanlines_from_pupil_device(device, clock_config['slitscan'])
create_clock(scanlines, spiral, clock_config)
| 956
| 37.28
| 77
|
py
|
gaze-spirals
|
gaze-spirals-main/spiral.py
|
import argparse
import json
import cv2 as cv
from utils.utils_load import VideoReader, GazeReader
from utils.utils_spiral import create_spiral, blank_spiral
from utils.utils_linear import scanlines_from_files
parser = argparse.ArgumentParser()
parser.add_argument('--gaze', required=True, type=str)
parser.add_argument('--video', required=True, type=str)
parser.add_argument('--gaze_config', required=True, type=str)
parser.add_argument('--live_preview', action='store_true')
args = parser.parse_args()
with open(args.gaze_config, 'r') as f:
gaze = GazeReader(args.gaze, f)
video = VideoReader(args.video)
with open('configurations/config_spiral.json', 'r') as clock_file:
spiral_config = json.load(clock_file)
scanlines = scanlines_from_files(video, gaze, spiral_config['slitscan'])
spiral = blank_spiral(video.videoCaptureFrameCount, spiral_config)
spiral = create_spiral(scanlines, spiral, spiral_config, live_preview=args.live_preview)
cv.imwrite('spiral.png', spiral)
| 1,039
| 36.142857
| 96
|
py
|
gaze-spirals
|
gaze-spirals-main/linear.py
|
import argparse
import json
import cv2 as cv
import numpy as np
from utils.utils_linear import scanlines_from_files
from utils.utils_load import VideoReader, GazeReader
from linear import scanlines_from_files
from tqdm import tqdm
def create_blank(width, height, rgb_color=(10, 10, 10)):
image = np.zeros((width, height, 3), np.uint8)
color = tuple(reversed(rgb_color))
image[:] = color
return image
def create_linear_slitscan(video, gaze, kwargs, target_width=-1, live_preview=True):
line_width = kwargs['line_width']
line_height = kwargs['line_height']
sampling = kwargs['sampling']
spectogram_height = kwargs['SPECTOGRAM_HEIGHT']
scan_width = 2*line_width+1
full_width = video.videoCaptureFrameCount*scan_width
if target_width == -1:
target_width = full_width
else:
if target_width % scan_width != 0:
print(f'Warning: The target width of "{target_width} px" is not of a multiple of the scan width "{scan_width} px"!')
target_width -= target_width % scan_width
num_lines = np.ceil(full_width/target_width).astype(int)
canvas = create_blank(num_lines*(line_height+spectogram_height), target_width)
cursor_x = cursor_y = 0
for _, scanline in tqdm(scanlines_from_files(video, gaze, kwargs), desc='create_slitscan', unit='Frame', total=video.videoCaptureFrameCount//sampling):
canvas[cursor_y: cursor_y+line_height, cursor_x: cursor_x+scan_width] = scanline
if live_preview:
downscaled = cv.resize(canvas, (1200, 300*num_lines), interpolation=cv.INTER_AREA)
cv.imshow('live preview (rescaled)', downscaled)
cv.waitKey(1)
cursor_x += scan_width
if cursor_x >= target_width:
cursor_x = 0
cursor_y += line_height
return canvas
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gaze', required=True, type=str)
parser.add_argument('--video', required=True, type=str)
parser.add_argument('--target_width', required=False, type=int, default=-1)
parser.add_argument('--gaze_config', required=True, type=str)
parser.add_argument('--live_preview', action='store_true')
args = parser.parse_args()
with open(args.gaze_config, 'r') as f:
gaze = GazeReader(args.gaze, f)
video = VideoReader(args.video)
with open('configurations/config_linear.json', 'r') as clock_file:
config_linear = json.load(clock_file)
slitscan = create_linear_slitscan(video, gaze, config_linear, target_width=args.target_width, live_preview=args.live_preview)
cv.imwrite('slitscan.png', slitscan)
| 2,694
| 38.632353
| 155
|
py
|
gaze-spirals
|
gaze-spirals-main/utils/utils_clock.py
|
import numpy as np
import cv2 as cv
def get_angle(num_sample, angle_k):
return np.radians(pow(num_sample, angle_k))
def get_radius(angle, height):
return height*angle/(2*np.pi)
def hex2bgr(hex_str):
r = int(hex_str[1:3], 16)
g = int(hex_str[3:5], 16)
b = int(hex_str[5:7], 16)
return np.array([b, g, r])
def clock_set_scanline(spiral, line, radius, angle, kwargs, shade=False):
line_height = kwargs['line_height']
line_width = kwargs['line_width']
spiralsize = spiral.shape[0]
xpos = np.cos(angle)*radius
ypos = np.sin(angle)*radius
xpos = int(xpos+spiralsize/2)
ypos = int(ypos+spiralsize/2)
rot_mat = cv.getRotationMatrix2D((line_height, line_height), 90-np.degrees(angle), 1)
dst_patch = spiral[(ypos-line_height): (ypos+line_height), (xpos-line_height): (xpos+line_height)]
src_patch = np.zeros((2*line_height, 2*line_height, 4), dtype=np.uint8)
src_patch[line_height:, line_height-line_width: (line_height)+line_width+1, :] = line
src_patch = cv.warpAffine(src_patch, rot_mat, (2*line_height, 2*line_height), flags=cv.INTER_NEAREST)
src_set = src_patch != 0
# TODO: Improve
if shade:
#dst_patch[src_set] = (dst_patch[src_set]*1.1).astype(np.uint8)
dst_patch[src_set] = src_patch[src_set]
else:
dst_patch[src_set] = src_patch[src_set]
def create_clock(scanlines, spiral, config):
offset = -1
border_width = int(config['border-width'])
border_color = hex2bgr(config['border-color'])
for timestamp, line in scanlines:
if offset == -1:
offset = timestamp
timestamp -= offset
line = cv.cvtColor(line, cv.COLOR_BGR2BGRA)
line[..., 3] = 255
line[:border_width, :, :3] = border_color
line[-border_width:, :, :3] = border_color
for ring_nr, ring_config in enumerate(config['rings']):
for t in (timestamp, ):
y = t % (config['time-unit']**(ring_nr+1))
y = 360*y/(config['time-unit']**(ring_nr+1))
y *= ring_config['radial-speed']
angle = get_angle(y, angle_k=1)
radius = get_radius(2*np.pi*(ring_nr+1), config['slitscan']['line_height'])
if t > timestamp:
blank_line = np.ones_like(line)
clock_set_scanline(spiral, blank_line, radius, angle, config['slitscan'], shade=True)
else:
clock_set_scanline(spiral, line, radius, angle, config['slitscan'])
spiral_downscaled = cv.resize(spiral, (config['window-width'], config['window-height']), interpolation=cv.INTER_CUBIC)
cv.imshow('spiral', spiral_downscaled)
cv.waitKey(max(1, config['delay-ms']))
def blank_clock(config):
line_height = config['slitscan']['line_height']
line_width = config['slitscan']['line_width']
border_width = int(config['border-width'])
border_color = hex2bgr(config['border-color'])
spiralsize = int(2*get_radius(8*np.pi, line_height))
spiral = np.zeros((spiralsize, spiralsize, 4), dtype=np.uint8)
for angle in np.arange(0, 360, 0.1):
line = np.zeros((line_height, 2*line_width+1, 4))
line[..., :3] = 20
line[..., 3] = 255
line[:border_width, :, :3] = border_color
line[-border_width:, :, :3] = border_color
angle = get_angle(angle, angle_k=1)
radius = get_radius(2*np.pi, line_height)
clock_set_scanline(spiral, line, radius, angle, config['slitscan'])
radius = get_radius(4*np.pi, line_height)
clock_set_scanline(spiral, line, radius, angle, config['slitscan'])
radius = get_radius(6*np.pi, line_height)
clock_set_scanline(spiral, line, radius, angle, config['slitscan'])
return spiral
| 3,820
| 32.814159
| 126
|
py
|
gaze-spirals
|
gaze-spirals-main/utils/utils_spiral.py
|
import cv2 as cv
import numpy as np
def get_angle(num_sample, angle_k):
return np.radians(pow(num_sample, angle_k))
def get_radius(angle, height):
return height*angle / (2*np.pi)
def spiral_set_scanline(spiral, line, num_sample, kwargs):
line_height = kwargs['slitscan']['line_height']
line_width = kwargs['slitscan']['line_width']
ANGLE_K = kwargs['ANGLE_K']
spiralsize = spiral.shape[0]
angle = get_angle(num_sample, ANGLE_K)
radius = get_radius(angle, line_height)
xpos = np.cos(angle)*radius
ypos = np.sin(angle)*radius
xpos = int(xpos+spiralsize/2)
ypos = int(ypos+spiralsize/2)
rot_mat = cv.getRotationMatrix2D((line_height, line_height), 90-np.degrees(angle), 1)
dst_patch = spiral[(ypos-line_height): (ypos+line_height), (xpos-line_height): (xpos+line_height)]
src_patch = np.zeros((2*line_height, 2*line_height, 4), dtype=np.uint8)
src_patch[line_height:, line_height-line_width: (line_height)+line_width+1, :] = line
src_patch = cv.warpAffine(src_patch, rot_mat, (2*line_height, 2*line_height), flags=cv.INTER_NEAREST)
src_set = src_patch != 0
dst_patch[src_set] = src_patch[src_set]
def create_spiral(scanlines, spiral, kwargs, live_preview=False):
num_sample = 0
for _, line in scanlines:
line = cv.cvtColor(line, cv.COLOR_RGB2RGBA)
line[..., 3] = 255
spiral_set_scanline(spiral, line, num_sample, kwargs)
num_sample += 1
if live_preview:
spiral_downscaled = cv.resize(spiral, (1200, 1200), interpolation=cv.INTER_CUBIC)
cv.imshow('live preview (scaled)', spiral_downscaled)
cv.waitKey(1)
return spiral
def blank_spiral(num_frames, kwargs):
line_height = kwargs['slitscan']['line_height']
sampling = kwargs['slitscan']['sampling']
ANGLE_K = kwargs['ANGLE_K']
steps = num_frames
max_angle = get_angle(steps//sampling, ANGLE_K) + 2 * np.pi
spiralsize = int(2*get_radius(max_angle, line_height))
return np.zeros((spiralsize, spiralsize, 4), dtype=np.uint8)
| 2,075
| 32.483871
| 105
|
py
|
gaze-spirals
|
gaze-spirals-main/utils/utils_linear.py
|
import cv2 as cv
import numpy as np
import time
def transform_frame(img, new_height):
height, width = img.shape[:2]
aspect = width / height
videoWidth = int(new_height*aspect)
videoHeight = new_height
return cv.resize(img, (videoWidth, videoHeight), interpolation=cv.INTER_CUBIC)
def transform_gaze(gaze, new_size, old_size):
pos_x, pos_y = gaze
new_width, new_height = new_size
old_width, old_height = old_size
return int(pos_x*new_width/old_width), int(pos_y*new_height/old_height)
def extract_scanline(img, gaze, kwargs):
SLITSAN_SOURCE = kwargs['source']
line_width = kwargs['line_width']
line_height = kwargs['line_height']
vertical_crop = kwargs['vertical_crop']
vertical_focus = kwargs['vertical_focus']
off_center = int((1-vertical_focus)*line_height)
pos_x, pos_y = gaze
old_size = img.shape[:2]
if SLITSAN_SOURCE == 'center':
img = transform_frame(img, line_height)
pos_x, pos_y = transform_gaze((pos_x, pos_y), new_size=(img.shape[1], img.shape[0]), old_size=old_size)
center_x = int(img.shape[1]//2)
scanline = img[:,center_x-line_width:center_x+line_width+1]
return scanline
elif SLITSAN_SOURCE == 'gaze-local':
if pos_x >= line_width and pos_x < img.shape[1]-line_width and pos_y >= vertical_crop and pos_y < img.shape[0]-vertical_crop:
center_y = int(img.shape[0] // 2)
scanline = img[center_y-vertical_crop: center_y+vertical_crop, pos_x-line_width:pos_x+line_width+1]
scanline = cv.resize(scanline, (2*line_width+1, line_height), interpolation=cv.INTER_CUBIC)
return scanline
else:
return np.zeros((line_height, 2*line_width+1, 3), np.uint8)
elif SLITSAN_SOURCE == 'gaze-global':
img = transform_frame(img, line_height)
pos_x, pos_y = transform_gaze((pos_x, pos_y), new_size=(img.shape[1], img.shape[0]), old_size=old_size)
if pos_x >= line_width and pos_x < img.shape[1]-line_width:
scanline = img[:, pos_x-line_width:pos_x+line_width+1]
scanline[:pos_y-off_center] = scanline[:pos_y-off_center] * 0.7
scanline[pos_y+off_center:] = scanline[pos_y+off_center: ] * 0.7
return scanline
else:
return np.zeros((line_height, 2*line_width+1, 3), np.uint8)
else:
raise ValueError(f'Unknown slitscan source: {SLITSAN_SOURCE}')
def scanlines_from_files(video, gaze, kwargs):
dt = time.time()
for num_frame in range(int(video.videoCaptureFrameCount)):
img = video.getFrame(num_frame)
pos = gaze.getGaze(num_frame)
dt += 1./video.videoCaptureFrameCount
yield int(dt), extract_scanline(img, pos, kwargs)
def scanlines_from_pupil_device(device, kwargs):
while True:
scene_sample, gaze_sample = device.receive_matched_scene_video_frame_and_gaze()
dt = scene_sample.timestamp_unix_seconds
img = scene_sample.bgr_pixels
yield dt, extract_scanline(img, (gaze_sample.x, gaze_sample.y), kwargs)
| 3,085
| 39.077922
| 133
|
py
|
gaze-spirals
|
gaze-spirals-main/utils/utils_load.py
|
import cv2 as cv
import numpy as np
import json
from lib.gaze_utils.load import parse_gaze
class VideoReader:
"""This class reads video data"""
def __init__(self, filename):
self.videoCapture = cv.VideoCapture(filename)
# get frames per second
self.videoCaptureFps = self.videoCapture.get(cv.CAP_PROP_FPS)
# get total frame count
self.videoCaptureFrameCount = int(self.videoCapture.get(cv.CAP_PROP_FRAME_COUNT))
self.videoHeight = int(self.videoCapture.get(cv.CAP_PROP_FRAME_HEIGHT))
self.videoWidth = int(self.videoCapture.get(cv.CAP_PROP_FRAME_WIDTH))
self.lastFrame=0
self.filename = filename
def getFrame(self,fnr):
"""
This function retrieves a video image by frame number
"""
if self.lastFrame != fnr-1:
self.videoCapture.set(cv.CAP_PROP_POS_FRAMES, fnr)
success,frame = self.videoCapture.read()
self.lastFrame=fnr
if not success:
frame = np.zeros((self.videoHeight, self.videoWidth, 3), np.uint8)
self.lastFrame=0
return frame
class GazeReader:
def __init__(self, gaze, config_file):
config = json.load(config_file)
gaze, special_lines = parse_gaze(gaze, config)
self.gaze = gaze
self.gaze['FRAME'] = self.gaze['FRAME'].astype(int)
self.special_lines = special_lines
def getGaze(self, fnr):
entries = self.gaze[self.gaze['FRAME'] == fnr]
if entries.shape[0] > 0:
pos_x = int(entries.iloc[0]['GAZE X'])
pos_y = int(entries.iloc[0]['GAZE Y'])
else:
pos_x = pos_y = -1
return pos_x, pos_y
| 1,704
| 31.169811
| 89
|
py
|
gaze-spirals
|
gaze-spirals-main/lib/gaze_utils/eyelink_utils.py
|
import pandas as pd
def parse_eyelink_fixations(fixation_events, separator='\t', check_consistency=True):
efix_events = [s for s in fixation_events if s.startswith('EFIX')]
if check_consistency:
sfix_events = [s for s in fixation_events if s.startswith('SFIX')]
assert len(sfix_events) == len(efix_events), "Number of SFIX events does not match number EFIX events!"
efix_data = map(lambda s: s.split(separator)[1:], efix_events)
columns = ['EYE', 'START_TIME', 'END_TIME', 'DURATION', 'AVG_X', 'AVG_Y', 'AVG_PUPIL']
df = pd.DataFrame(data=efix_data, columns=columns)
# Convert every column except for 'EYE' to numeric
df[columns[1:]] = df[columns[1:]].apply(pd.to_numeric)
return df
def parse_eyelink_saccades(saccade_events, separator='\t', check_consistency=True):
esacc_events = [s for s in saccade_events if s.startswith('ESACC')]
if check_consistency:
ssacc_events = [s for s in saccade_events if s.startswith('SSACC')]
assert len(ssacc_events) == len(esacc_events), "Number of SSAC events does not match number ESACC events!"
esacc_data = map(lambda s: s.split(separator)[1:], esacc_events)
columns = ['EYE', 'START_TIME', 'END_TIME', 'DURATION', 'START_X', 'START_Y', 'END_X', 'END_Y', 'AMP', 'PEAK_VEL']
df = pd.DataFrame(data=esacc_data, columns=columns)
# Convert every column except for 'EYE' to numeric
df[columns[1:]] = df[columns[1:]].apply(pd.to_numeric)
return df
def parse_eyelink_blinks(blink_events, separator='\t', check_consistency=True):
eblink_events = [s for s in blink_events if s.startswith('EBLINK')]
if check_consistency:
sblink_events = [s for s in blink_events if s.startswith('SBLINK')]
assert len(sblink_events) == len(eblink_events), "Number of SBLINK events does not match number EBLINK events!"
eblink_data = map(lambda s: s.split(separator)[1:], eblink_events)
columns = ['EYE', 'START_TIME', 'END_TIME', 'DURATION']
df = pd.DataFrame(data=eblink_data, columns=columns)
# Convert every column except for 'EYE' to numeric
df[columns[1:]] = df[columns[1:]].apply(pd.to_numeric)
return df
| 2,176
| 47.377778
| 119
|
py
|
gaze-spirals
|
gaze-spirals-main/lib/gaze_utils/utils.py
|
def add_frame_col(gaze, fps, remove_offset=False):
if 'FRAME' in gaze:
raise ValueError('FRAME column already exists!')
if remove_offset:
offset = gaze['TIME_MS'].values[0]
else:
offset = 0
gaze['FRAME'] = gaze['TIME_MS'].apply(lambda t_ms: (t_ms-offset)*fps/1000).round()
gaze['FRAME'] = gaze['FRAME'] + 1
return gaze
| 367
| 32.454545
| 86
|
py
|
gaze-spirals
|
gaze-spirals-main/lib/gaze_utils/load.py
|
import pandas as pd
import re
from tqdm import tqdm
def get_columns(config):
columns = {}
for col in config['columns']:
p = col['position']
columns[p] = col['mappedTo']
columns = sorted(columns.items(), key=lambda x: x[0])
return zip(*columns)
def split_lines(filename, separator, mappings, use_cols):
"""
Reads gaze file and splits lines two separate groups:
Main lines: The usual gaze data that is composed of several entries such as timestamp, x and y.
Special lines: This can be anything that does not conform to the format of the main data, such as special events (fixations, saccades, etc.)
For example Eyelink ascii files encode gaze events such as fixations and saccades as special lines.
"""
special_lines = {}
main_lines = []
with open(filename, 'r') as f:
for line in tqdm(f.readlines(), unit='line', desc='Preprocess lines', disable=True):
line = line.strip('\n')
if len(line) == 0:
continue
for m in mappings:
mappedTo = m['mappedTo']
if re.match(m['pattern'], line) is not None:
if mappedTo not in special_lines:
special_lines[mappedTo] = []
special_lines[mappedTo].append(line)
break
else:
entries = line.split(separator)
entries = [entries[idx] for idx in use_cols]
main_lines.append(entries)
return main_lines, special_lines
def parse_gaze(filename, config):
positions, column_names = get_columns(config)
if 'special_lines' in config:
mappings = config['special_lines']
mappings.append({'pattern': f'^{re.escape(config["comment"])}', 'mappedTo': ''})
main_lines, special_lines = split_lines(filename, use_cols=positions, separator=config['separator'], mappings=mappings)
df = pd.DataFrame(data=main_lines, columns=column_names)
else:
special_lines = []
df = pd.read_csv(filename, names=column_names, header=None, usecols=positions, sep=config['separator'], comment=config['comment'])
df = df[config['skip_header_lines']:]
for c in column_names:
if c != 'EVENT':
df[c] = df[c].astype(float)
if 'column_transform' in config:
for transform in config['column_transform']:
col = transform['column']
coeff_a = transform['linear-coeff']['a']
coeff_b = transform['linear-coeff']['b']
assert type(coeff_a) in (int, float), "Coefficient a in linear transform must be numeric!"
assert type(coeff_b) in (int, float), "Coefficient a in linear transform must be numeric!"
df[col] = df[col].apply(lambda x: (x*coeff_a+coeff_b))
return df, special_lines
if __name__=='__main__':
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument('--gaze_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
args = parser.parse_args()
with open(args.config_path, 'r') as f:
config = json.load(f)
gaze, special_lines = parse_gaze(args.gaze_path, config)
print(gaze)
print(special_lines)
| 2,896
| 31.550562
| 141
|
py
|
gaze-spirals
|
gaze-spirals-main/lib/gaze_utils/__init__.py
| 0
| 0
| 0
|
py
|
|
gaze-spirals
|
gaze-spirals-main/lib/gaze_utils/scripts/edf2asc.py
|
import subprocess
import argparse
from glob import glob
import os
import os.path
EDF2ASC_OPTIONS = [
'-sg', # outputs sample GAZE data if present (default)
'-nv', # hide viewer commands
'-nmsg', # blocks message event output
'-t', # use only tabs as delimiters
]
def edf_to_asc(edf_file: str, out_dir: str, options: list) -> str:
"""
Calls the edf2asc command and saves it.
:param edf_file: Path to edf file.
:param out_dir: Where to place the asc file.
:returns: Path to asc file.
"""
assert(edf_file[-4:] == ".edf")
if out_dir is None:
out_file = f'{edf_file[:-4]}.asc'
else:
out_file = os.path.join(out_dir, os.path.basename(edf_file)[:-4] + '.asc')
if out_dir is not None:
subprocess.run(["edf2asc", edf_file, out_file] + options)
else:
subprocess.run(["edf2asc", edf_file] + options)
return out_file
if __name__ == '__main__':
print("WARNING: Requires 'edf2asc' to installed!")
parser = argparse.ArgumentParser()
parser.add_argument("--edf_file", type=str, default=None)
parser.add_argument("--edf_glob_pattern", type=str)
parser.add_argument("--asc_out_dir", type=str, default=None)
parser.add_argument("--options", type=str, default=EDF2ASC_OPTIONS)
args = vars(parser.parse_args())
if args['edf_file'] is not None:
print(edf_to_asc(args['edf_file'], args['asc_out_dir'], args['options']))
elif args['edf_glob_pattern'] is not None:
for path in glob(args['edf_glob_pattern']):
edf_to_asc(path, args['asc_out_dir'], args['options'])
| 1,619
| 30.764706
| 82
|
py
|
openabe
|
openabe-master/bindings/python/test.py
|
from __future__ import print_function
import pyopenabe
print("Testing Python bindings for PyOpenABE...")
openabe = pyopenabe.PyOpenABE()
cpabe = openabe.CreateABEContext("CP-ABE")
cpabe.generateParams()
cpabe.keygen("|two|three|", "alice")
pt1 = b"hello world!"
ct = cpabe.encrypt("((one or two) and three)", pt1)
print("ABE CT: ", len(ct))
pt2 = cpabe.decrypt("alice", ct)
print("PT: ", pt2)
assert pt1 == pt2, "Didn't recover the message!"
print("Testing key import")
msk = cpabe.exportSecretParams()
mpk = cpabe.exportPublicParams()
uk = cpabe.exportUserKey("alice")
cpabe2 = openabe.CreateABEContext("CP-ABE")
cpabe2.importSecretParams(msk)
cpabe2.importPublicParams(mpk)
cpabe2.importUserKey("alice", uk)
ct = cpabe2.encrypt("((one or two) and three)", pt1)
print("ABE CT: ", len(ct))
pt2 = cpabe2.decrypt("alice", ct)
print("PT: ", pt2)
assert pt1 == pt2, "Didn't recover the message!"
print("CP-ABE Success!")
pke = openabe.CreatePKEContext()
pke.keygen("user1")
ct1 = pke.encrypt("user1", pt1)
print("PKE CT: ", len(ct1))
pt2 = pke.decrypt("user1", ct1)
assert pt1 == pt2, "Didn't recover the message!"
print("PKE Success!")
pksig = openabe.CreatePKSIGContext()
pksig.keygen("user2")
sig = pksig.sign("user2", pt1)
print("PKSIG: ", len(sig))
if pksig.verify("user2", pt1, sig):
print("PKSIG Success!")
else:
print("ERROR during verify!")
kpabe = openabe.CreateABEContext("KP-ABE")
kpabe.generateParams()
kpabe.keygen("((one or three) and date < April 18, 2018)", "bob")
ct = kpabe.encrypt("|one|date=February 1, 2018|two", pt1)
print("KP-ABE CT size: ", len(ct))
pt2 = kpabe.decrypt("bob", ct)
print("PT: ", pt2)
assert pt1 == pt2, "Didn't recover the message!"
print("Testing key imports")
msk = kpabe.exportSecretParams()
mpk = kpabe.exportPublicParams()
uk = kpabe.exportUserKey("bob")
kpabe2 = openabe.CreateABEContext("KP-ABE")
kpabe2.importSecretParams(msk)
kpabe2.importPublicParams(mpk)
kpabe2.importUserKey("bob", uk)
ct = kpabe.encrypt("|one|date=February 1, 2018|two", pt1)
print("KP-ABE CT size: ", len(ct))
pt2 = kpabe.decrypt("bob", ct)
assert pt1 == pt2, "Didn't recover the message!"
print("KP-ABE Success!")
print("All tests passed!")
| 2,203
| 21.04
| 65
|
py
|
openabe
|
openabe-master/bindings/python/setup.py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import os, sys
__version__ = "1.0.0"
os_platform = sys.platform
ZROOT_DIR = os.environ.get('ZROOT')
ZML_LIB = os.environ.get('ZML_LIB')
if (ZROOT_DIR is None):
sys.exit("Need to source env via '. ./env' in root directory")
print("ZROOT_DIR: ", ZROOT_DIR)
print("ZML_LIB: ", ZML_LIB)
if ZML_LIB and ZML_LIB == "with_openssl":
with_openssl = True
else:
with_openssl = False
_extra_objects = []
if "darwin" in os_platform:
_extra_objects=[ZROOT_DIR + "/root/lib/libopenabe.a", "-lpthread"]
_extra_compile_args = []
if with_openssl:
_extra_compile_args += ["-DBP_WITH_OPENSSL"]
else:
# add relic
_extra_compile_args += ["-Wno-implicit-function-declaration", "-Wno-macro-redefined"]
_extra_objects += ["-lrelic", "-lrelic_ec"]
elif "linux" in os_platform:
_extra_objects=["-lopenabe", "-lpthread"]
_extra_compile_args = []
if with_openssl:
_extra_compile_args += ["-DBP_WITH_OPENSSL"]
else:
# add relic
_extra_compile_args += ["-Wno-implicit-function-declaration", "-Wno-macro-redefined"]
_extra_objects += ["-lrelic", "-lrelic_ec"]
_extra_compile_args=["-Wall", "-Wtype-limits"]
else:
sys.exit("Your '%s' platform is currently unsupported." % os_platform)
_extra_objects += ["-lgmp", "-lssl", "-lcrypto"]
ext_modules = [Extension("pyopenabe",
["pyopenabe.pyx"],
language='c++',
extra_objects=_extra_objects,
include_dirs=[ZROOT_DIR + "/deps/root/include", ZROOT_DIR + "/root/include"],
library_dirs=[ZROOT_DIR + "/deps/root/lib", ZROOT_DIR + "/root/lib"],
extra_compile_args=["-std=c++11", "-Wno-unused-function", "-DGTEST_USE_OWN_TR1_TUPLE=1",
"-Wno-deprecated", "-pthread"] + _extra_compile_args
)]
setup(
name = 'pyopenabe',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
version = __version__
)
| 2,078
| 33.65
| 101
|
py
|
taskgrouping
|
taskgrouping-master/taskonomy_loader.py
|
import torch.utils.data as data
from PIL import Image, ImageOps
import os
import os.path
import zipfile as zf
import io
import logging
import random
import copy
import numpy as np
import time
import torch
import multiprocessing
import warnings
import torchvision.transforms as transforms
from multiprocessing import Manager
class TaskonomyLoader(data.Dataset):
def __init__(self,
root,
label_set=['depth_zbuffer','normal','segment_semantic','edge_occlusion','reshading','keypoints2d','edge_texture'],
model_whitelist=None,
model_limit=None,
output_size=None,
convert_to_tensor=True,
return_filename=False,
half_sized_output=False,
augment=False):
manager=Manager()
self.root = root
self.model_limit=model_limit
self.records=[]
if model_whitelist is None:
self.model_whitelist=None
else:
self.model_whitelist = set()
with open(model_whitelist) as f:
for line in f:
self.model_whitelist.add(line.strip())
for i,(where, subdirs, files) in enumerate(os.walk(os.path.join(root,'rgb'))):
if subdirs!=[]: continue
model = where.split('/')[-1]
if self.model_whitelist is None or model in self.model_whitelist:
full_paths = [os.path.join(where,f) for f in files]
if isinstance(model_limit, tuple):
full_paths.sort()
full_paths = full_paths[model_limit[0]:model_limit[1]]
elif model_limit is not None:
full_paths.sort()
full_paths = full_paths[:model_limit]
self.records+=full_paths
#self.records = manager.list(self.records)
self.label_set = label_set
self.output_size = output_size
self.half_sized_output=half_sized_output
self.convert_to_tensor = convert_to_tensor
self.return_filename=return_filename
self.to_tensor = transforms.ToTensor()
self.augment = augment
if augment == "aggressive":
print('Data augmentation is on (aggressive).')
elif augment:
print('Data augmentation is on (flip).')
else:
print('no data augmentation')
self.last = {}
def process_image(self,im,input=False):
output_size=self.output_size
if self.half_sized_output and not input:
if output_size is None:
output_size=(128,128)
else:
output_size=output_size[0]//2,output_size[1]//2
if output_size is not None and output_size!=im.size:
im = im.resize(output_size,Image.BILINEAR)
bands = im.getbands()
if self.convert_to_tensor:
if bands[0]=='L':
im = np.array(im)
im.setflags(write=1)
im = torch.from_numpy(im).unsqueeze(0)
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = self.to_tensor(im)
return im
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is an uint8 matrix of integers with the same width and height.
If there is an error loading an image or its labels, simply return the previous example.
"""
with torch.no_grad():
file_name=self.records[index]
save_filename = file_name
flip_lr = (random.randint(0,1) > .5 and self.augment)
flip_ud = (random.randint(0,1) > .5 and (self.augment=="aggressive"))
pil_im = Image.open(file_name)
if flip_lr:
pil_im = ImageOps.mirror(pil_im)
if flip_ud:
pil_im = ImageOps.flip(pil_im)
im = self.process_image(pil_im,input=True)
error=False
ys = {}
mask = None
to_load = self.label_set
if len(set(['edge_occlusion','normal','reshading','principal_curvature']).intersection(self.label_set))!=0:
if os.path.isfile(file_name.replace('rgb','mask')):
to_load.append('mask')
elif 'depth_zbuffer' not in to_load:
to_load.append('depth_zbuffer')
for i in to_load:
if i=='mask' and mask is not None:
continue
yfilename = file_name.replace('rgb',i)
try:
yim = Image.open(yfilename)
except:
yim = self.last[i].copy()
error = True
if (i in self.last and yim.getbands() != self.last[i].getbands()) or error:
yim = self.last[i].copy()
try:
self.last[i]=yim.copy()
except:
pass
if flip_lr:
try:
yim = ImageOps.mirror(yim)
except:
pass
if flip_ud:
try:
yim = ImageOps.flip(yim)
except:
pass
try:
yim = self.process_image(yim)
except:
yim = self.last[i].copy()
yim = self.process_image(yim)
if i == 'depth_zbuffer':
yim = yim.float()
mask = yim < (2**13)
yim-=1500.0
yim/= 1000.0
elif i == 'edge_occlusion':
yim = yim.float()
yim-=56.0248
yim/=239.1265
elif i == 'keypoints2d':
yim = yim.float()
yim-=50.0
yim/=100.0
elif i == 'edge_texture':
yim = yim.float()
yim-=718.0
yim/=1070.0
elif i == 'normal':
yim = yim.float()
yim -=.5
yim *=2.0
if flip_lr:
yim[0]*=-1.0
if flip_ud:
yim[1]*=-1.0
elif i == 'reshading':
yim=yim.mean(dim=0,keepdim=True)
yim-=.4962
yim/=0.2846
#print('reshading',yim.shape,yim.max(),yim.min())
elif i == 'principal_curvature':
yim=yim[:2]
yim-=torch.tensor([0.5175, 0.4987]).view(2,1,1)
yim/=torch.tensor([0.1373, 0.0359]).view(2,1,1)
#print('principal_curvature',yim.shape,yim.max(),yim.min())
elif i == 'mask':
mask=yim.bool()
yim=mask
ys[i] = yim
if mask is not None:
ys['mask']=mask
# print(self.label_set)
# print('rgb' in self.label_set)
if not 'rgb' in self.label_set:
ys['rgb']=im
if self.return_filename:
return im, ys, file_name
else:
return im, ys
def __len__(self):
return (len(self.records))
def show(im, ys):
from matplotlib import pyplot as plt
plt.figure(figsize=(30,30))
plt.subplot(4,3,1).set_title('RGB')
im = im.permute([1,2,0])
plt.imshow(im)
#print(im)
#print(ys)
for i, y in enumerate(ys):
yim=ys[y]
plt.subplot(4,3,2+i).set_title(y)
if y=='normal':
yim+=1
yim/=2
if yim.shape[0]==2:
yim = torch.cat([yim,torch.zeros((1,yim.shape[1],yim.shape[2]))],dim=0)
yim = yim.permute([1,2,0])
yim = yim.squeeze()
plt.imshow(np.array(yim))
plt.show()
def test():
loader = TaskonomyLoader(
'/home/tstand/Desktop/lite_taskonomy/',
label_set=['normal','reshading','principal_curvature','edge_occlusion','depth_zbuffer'],
augment='aggressive')
totals= {}
totals2 = {}
count = {}
indices= list(range(len(loader)))
random.shuffle(indices)
for data_count, index in enumerate(indices):
im, ys=loader[index]
show(im,ys)
mask = ys['mask']
#mask = ~mask
print(index)
for i, y in enumerate(ys):
yim=ys[y]
yim = yim.float()
if y not in totals:
totals[y]=0
totals2[y]=0
count[y]=0
totals[y]+=(yim*mask).sum(dim=[1,2])
totals2[y]+=((yim**2)*mask).sum(dim=[1,2])
count[y]+=(torch.ones_like(yim)*mask).sum(dim=[1,2])
#print(y,yim.shape)
std = torch.sqrt((totals2[y]-(totals[y]**2)/count[y])/count[y])
print(data_count,'/',len(loader),y,'mean:',totals[y]/count[y],'std:',std)
def output_mask(index,loader):
from matplotlib import pyplot as plt
filename=loader.records[index]
filename=filename.replace('rgb','mask')
filename=filename.replace('/intel_nvme/taskonomy_data/','/run/shm/')
if os.path.isfile(filename):
return
print(filename)
x,ys = loader[index]
mask =ys['mask']
mask=mask.squeeze()
mask_im=Image.fromarray(mask.numpy())
mask_im = mask_im.convert(mode='1')
# plt.subplot(2,1,1)
# plt.imshow(mask)
# plt.subplot(2,1,2)
# plt.imshow(mask_im)
# plt.show()
path, _ = os.path.split(filename)
os.makedirs(path,exist_ok=True)
mask_im.save(filename,bits=1,optimize=True)
def get_masks():
import multiprocessing
loader = TaskonomyLoader(
'/intel_nvme/taskonomy_data/',
label_set=['depth_zbuffer'],
augment=False)
indices= list(range(len(loader)))
random.shuffle(indices)
for count,index in enumerate(indices):
print(count,len(indices))
output_mask(index,loader)
if __name__ == "__main__":
test()
#get_masks()
| 10,600
| 29.81686
| 131
|
py
|
taskgrouping
|
taskgrouping-master/taskonomy_losses.py
|
import torch
import collections
sl=0
nl=0
nl2=0
nl3=0
dl=0
el=0
rl=0
kl=0
tl=0
al=0
cl=0
popular_offsets=collections.defaultdict(int)
batch_number=0
def segment_semantic_loss(output,target,mask):
global sl
sl = torch.nn.functional.cross_entropy(output.float(),target.long().squeeze(dim=1),ignore_index=0,reduction='mean')
return sl
def normal_loss(output,target,mask):
global nl
nl= rotate_loss(output,target,mask,normal_loss_base)
return nl
def normal_loss_simple(output,target,mask):
global nl
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask.float()
nl = out.mean()
return nl
def rotate_loss(output,target,mask,loss_name):
global popular_offsets
target=target[:,:,1:-1,1:-1].float()
mask = mask[:,:,1:-1,1:-1].float()
output=output.float()
val1 = loss = loss_name(output[:,:,1:-1,1:-1],target,mask)
val2 = loss_name(output[:,:,0:-2,1:-1],target,mask)
loss = torch.min(loss,val2)
val3 = loss_name(output[:,:,1:-1,0:-2],target,mask)
loss = torch.min(loss,val3)
val4 = loss_name(output[:,:,2:,1:-1],target,mask)
loss = torch.min(loss,val4)
val5 = loss_name(output[:,:,1:-1,2:],target,mask)
loss = torch.min(loss,val5)
val6 = loss_name(output[:,:,0:-2,0:-2],target,mask)
loss = torch.min(loss,val6)
val7 = loss_name(output[:,:,2:,2:],target,mask)
loss = torch.min(loss,val7)
val8 = loss_name(output[:,:,0:-2,2:],target,mask)
loss = torch.min(loss,val8)
val9 = loss_name(output[:,:,2:,0:-2],target,mask)
loss = torch.min(loss,val9)
#lst = [val1,val2,val3,val4,val5,val6,val7,val8,val9]
#print(loss.size())
loss=loss.mean()
#print(loss)
return loss
def normal_loss_base(output,target,mask):
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask
out = out.mean(dim=(1,2,3))
return out
def normal2_loss(output,target,mask):
global nl3
diff = output.float() - target.float()
out = torch.abs(diff)
out = out*mask.float()
nl3 = out.mean()
return nl3
def depth_loss_simple(output,target,mask):
global dl
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask.float()
dl = out.mean()
return dl
def depth_loss(output,target,mask):
global dl
dl = rotate_loss(output,target,mask,depth_loss_base)
return dl
def depth_loss_base(output,target,mask):
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask.float()
out = out.mean(dim=(1,2,3))
return out
def edge_loss_simple(output,target,mask):
global el
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask
el = out.mean()
return el
def reshade_loss(output,target,mask):
global rl
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask
rl = out.mean()
return rl
def keypoints2d_loss(output,target,mask):
global kl
kl = torch.nn.functional.l1_loss(output,target)
return kl
def edge2d_loss(output,target,mask):
global tl
tl = torch.nn.functional.l1_loss(output,target)
return tl
def auto_loss(output,target,mask):
global al
al = torch.nn.functional.l1_loss(output,target)
return al
def pc_loss(output,target,mask):
global cl
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask
cl = out.mean()
return cl
def edge_loss(output,target,mask):
global el
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask
el = out.mean()
return el
def get_taskonomy_loss(losses):
def taskonomy_loss(output,target):
if 'mask' in target:
mask = target['mask']
else:
mask=None
sum_loss=None
num=0
for n,t in target.items():
if n in losses:
o = output[n].float()
this_loss = losses[n](o,t,mask)
num+=1
if sum_loss:
sum_loss = sum_loss+ this_loss
else:
sum_loss = this_loss
return sum_loss#/num # should not take average when using xception_taskonomy_new
return taskonomy_loss
def get_losses_and_tasks(args):
task_str = args.tasks
losses = {}
criteria = {}
taskonomy_tasks = []
if 's' in task_str:
losses['segment_semantic'] = segment_semantic_loss
criteria['ss_l']=lambda x,y : sl
taskonomy_tasks.append('segment_semantic')
if 'd' in task_str:
if not args.rotate_loss:
losses['depth_zbuffer'] = depth_loss_simple
else:
print('got rotate loss')
losses['depth_zbuffer'] = depth_loss
criteria['depth_l']=lambda x,y : dl
taskonomy_tasks.append('depth_zbuffer')
if 'n' in task_str:
if not args.rotate_loss:
losses['normal']=normal_loss_simple
else:
print('got rotate loss')
losses['normal']=normal_loss
criteria['norm_l']=lambda x,y : nl
#criteria['norm_l2']=lambda x,y : nl2
taskonomy_tasks.append('normal')
if 'N' in task_str:
losses['normal2']=normal2_loss
criteria['norm2']=lambda x,y : nl3
taskonomy_tasks.append('normal2')
if 'k' in task_str:
losses['keypoints2d']=keypoints2d_loss
criteria['key_l']=lambda x,y : kl
taskonomy_tasks.append('keypoints2d')
if 'e' in task_str:
if not args.rotate_loss:
losses['edge_occlusion'] = edge_loss_simple
else:
print('got rotate loss')
losses['edge_occlusion'] = edge_loss
#losses['edge_occlusion']=edge_loss
criteria['edge_l']=lambda x,y : el
taskonomy_tasks.append('edge_occlusion')
if 'r' in task_str:
losses['reshading']=reshade_loss
criteria['shade_l']=lambda x,y : rl
taskonomy_tasks.append('reshading')
if 't' in task_str:
losses['edge_texture']=edge2d_loss
criteria['edge2d_l']=lambda x,y : tl
taskonomy_tasks.append('edge_texture')
if 'a' in task_str:
losses['rgb']=auto_loss
criteria['rgb_l']=lambda x,y : al
taskonomy_tasks.append('rgb')
if 'c' in task_str:
losses['principal_curvature']=pc_loss
criteria['pc_l']=lambda x,y : cl
taskonomy_tasks.append('principal_curvature')
#"nacre"
if args.task_weights:
weights=[float(x) for x in args.task_weights.split(',')]
losses2={}
criteria2={}
for l,w,c in zip(losses.items(),weights,criteria.items()):
losses[l[0]]=lambda x,y,z,l=l[1],w=w:l(x,y,z)*w
criteria[c[0]]=lambda x,y,c=c[1],w=w:c(x,y)*w
taskonomy_loss = get_taskonomy_loss(losses)
return taskonomy_loss,losses, criteria, taskonomy_tasks
| 6,961
| 27.650206
| 119
|
py
|
taskgrouping
|
taskgrouping-master/train_taskonomy.py
|
import argparse
import os
import shutil
import time
import platform
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.datasets as datasets
from taskonomy_losses import *
from taskonomy_loader import TaskonomyLoader
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
import copy
import numpy as np
import signal
import sys
import math
from collections import defaultdict
import scipy.stats
#from ptflops import get_model_complexity_info
import model_definitions as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch Taskonomy Training')
parser.add_argument('--data_dir', '-d', dest='data_dir',required=True,
help='path to training set')
parser.add_argument('--arch', '-a', metavar='ARCH',required=True,
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (required)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
help='mini-batch size (default: 64)')
parser.add_argument('--tasks', '-ts', default='sdnkt', dest='tasks',
help='which tasks to train on')
parser.add_argument('--model_dir', default='saved_models', dest='model_dir',
help='where to save models')
parser.add_argument('--image-size', default=256, type=int,
help='size of image side (images are square)')
parser.add_argument('-j', '--workers', default=4, type=int,
help='number of data loading workers (default: 4)')
parser.add_argument('-pf', '--print_frequency', default=1, type=int,
help='how often to print output')
parser.add_argument('--epochs', default=100, type=int,
help='maximum number of epochs to run')
parser.add_argument('-mlr', '--minimum_learning_rate', default=3e-5, type=float,
metavar='LR', help='End trianing when learning rate falls below this value.')
parser.add_argument('-lr', '--learning-rate',dest='lr', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('-ltw0', '--loss_tracking_window_initial', default=500000, type=int,
help='inital loss tracking window (default: 500000)')
parser.add_argument('-mltw', '--maximum_loss_tracking_window', default=2000000, type=int,
help='maximum loss tracking window (default: 2000000)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '-wd','--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--resume','--restart', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# parser.add_argument('--start-epoch', default=0, type=int,
# help='manual epoch number (useful on restarts)')
parser.add_argument('-n','--experiment_name', default='', type=str,
help='name to prepend to experiment saves.')
parser.add_argument('-v', '--validate', dest='validate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('-t', '--test', dest='test', action='store_true',
help='evaluate model on test set')
parser.add_argument('-r', '--rotate_loss', dest='rotate_loss', action='store_true',
help='should loss rotation occur')
parser.add_argument('--pretrained', dest='pretrained', default='',
help='use pre-trained model')
parser.add_argument('-vb', '--virtual-batch-multiplier', default=1, type=int,
metavar='N', help='number of forward/backward passes per parameter update')
parser.add_argument('--fp16', action='store_true',
help='Run model fp16 mode.')
parser.add_argument('-sbn', '--sync_batch_norm', action='store_true',
help='sync batch norm parameters accross gpus.')
parser.add_argument('-hs', '--half_sized_output', action='store_true',
help='output 128x128 rather than 256x256.')
parser.add_argument('-na','--no_augment', action='store_true',
help='Run model fp16 mode.')
parser.add_argument('-ml', '--model-limit', default=None, type=int,
help='Limit the number of training instances from a single 3d building model.')
parser.add_argument('-tw', '--task-weights', default=None, type=str,
help='a comma separated list of numbers one for each task to multiply the loss by.')
cudnn.benchmark = False
def main(args):
print(args)
print('starting on', platform.node())
if 'CUDA_VISIBLE_DEVICES' in os.environ:
print('cuda gpus:',os.environ['CUDA_VISIBLE_DEVICES'])
main_stream = torch.cuda.Stream()
if args.fp16:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
print('Got fp16!')
taskonomy_loss, losses, criteria, taskonomy_tasks = get_losses_and_tasks(args)
print("including the following tasks:", list(losses.keys()))
criteria2={'Loss':taskonomy_loss}
for key,value in criteria.items():
criteria2[key]=value
criteria = criteria2
print('data_dir =',args.data_dir, len(args.data_dir))
if args.no_augment:
augment = False
else:
augment = True
train_dataset = TaskonomyLoader(
args.data_dir,
label_set=taskonomy_tasks,
model_whitelist='train_models.txt',
model_limit=args.model_limit,
output_size = (args.image_size,args.image_size),
half_sized_output=args.half_sized_output,
augment=augment)
print('Found',len(train_dataset),'training instances.')
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](tasks=losses.keys(),half_sized_output=args.half_sized_output)
def get_n_params(model):
pp=0
for p in list(model.parameters()):
#print(p.size())
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
print("Model has", get_n_params(model), "parameters")
try:
print("Encoder has", get_n_params(model.encoder), "parameters")
#flops, params=get_model_complexity_info(model.encoder,(3,256,256), as_strings=False, print_per_layer_stat=False)
#print("Encoder has", flops, "Flops and", params, "parameters,")
except:
print("Each encoder has", get_n_params(model.encoders[0]), "parameters")
for decoder in model.task_to_decoder.values():
print("Decoder has", get_n_params(decoder), "parameters")
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
#tested with adamW. Poor results observed
#optimizer = adamW.AdamW(model.parameters(),lr= args.lr,weight_decay=args.weight_decay,eps=1e-3)
# Initialize Amp. Amp accepts either values or strings for the optional override arguments,
# for convenient interoperation with argparse.
if args.fp16:
model, optimizer = amp.initialize(model, optimizer,
opt_level='O1',
loss_scale="dynamic",
verbosity=0
)
print('Got fp16!')
#args.lr = args.lr*float(args.batch_size*args.virtual_batch_multiplier)/256.
# optionally resume from a checkpoint
checkpoint=None
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda())
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.pretrained != '':
print('loading pretrained weights for '+args.arch+' ('+args.pretrained+')')
model.encoder.load_state_dict(torch.load(args.pretrained))
if torch.cuda.device_count() >1:
model = torch.nn.DataParallel(model).cuda()
if args.sync_batch_norm:
from sync_batchnorm import patch_replication_callback
patch_replication_callback(model)
print('Virtual batch size =', args.batch_size*args.virtual_batch_multiplier)
if args.resume:
if os.path.isfile(args.resume) and 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, sampler=None)
val_loader = get_eval_loader(args.data_dir, taskonomy_tasks, args)
trainer=Trainer(train_loader,val_loader,model,optimizer,criteria,args,checkpoint)
if args.validate:
trainer.progress_table=[]
trainer.validate([{}])
print()
return
if args.test:
trainer.progress_table=[]
# replace val loader with a loader that loads test data
trainer.val_loader=get_eval_loader(args.data_dir, taskonomy_tasks, args,model_limit=(1000,2000))
trainer.validate([{}])
return
trainer.train()
def get_eval_loader(datadir, label_set, args,model_limit=1000):
print(datadir)
val_dataset = TaskonomyLoader(datadir,
label_set=label_set,
model_whitelist='val_models.txt',
model_limit=model_limit,
output_size = (args.image_size,args.image_size),
half_sized_output=args.half_sized_output,
augment=False)
print('Found',len(val_dataset),'validation instances.')
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=max(args.batch_size//2,1), shuffle=False,
num_workers=args.workers, pin_memory=True,sampler=None)
return val_loader
program_start_time = time.time()
def on_keyboared_interrupt(x,y):
#print()
sys.exit(1)
signal.signal(signal.SIGINT, on_keyboared_interrupt)
def get_average_learning_rate(optimizer):
try:
return optimizer.learning_rate
except:
s = 0
for param_group in optimizer.param_groups:
s+=param_group['lr']
return s/len(optimizer.param_groups)
class data_prefetcher():
def __init__(self, loader):
self.inital_loader = loader
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
# self.next_input = None
# self.next_target = None
self.loader = iter(self.inital_loader)
self.preload()
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
#self.next_target = self.next_target.cuda(async=True)
self.next_target = {key: val.cuda(non_blocking=True) for (key,val) in self.next_target.items()}
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def print_table(table_list, go_back=True):
if len(table_list)==0:
print()
print()
return
if go_back:
print("\033[F",end='')
print("\033[K",end='')
for i in range(len(table_list)):
print("\033[F",end='')
print("\033[K",end='')
lens = defaultdict(int)
for i in table_list:
for ii,to_print in enumerate(i):
for title,val in to_print.items():
lens[(title,ii)]=max(lens[(title,ii)],max(len(title),len(val)))
# printed_table_list_header = []
for ii,to_print in enumerate(table_list[0]):
for title,val in to_print.items():
print('{0:^{1}}'.format(title,lens[(title,ii)]),end=" ")
for i in table_list:
print()
for ii,to_print in enumerate(i):
for title,val in to_print.items():
print('{0:^{1}}'.format(val,lens[(title,ii)]),end=" ",flush=True)
print()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.std= 0
self.sum = 0
self.sumsq = 0
self.count = 0
self.lst = []
def update(self, val, n=1):
self.val = float(val)
self.sum += float(val) * n
#self.sumsq += float(val)**2
self.count += n
self.avg = self.sum / self.count
self.lst.append(self.val)
self.std=np.std(self.lst)
class Trainer:
def __init__(self,train_loader,val_loader,model,optimizer,criteria,args,checkpoint=None):
self.train_loader=train_loader
self.val_loader=val_loader
self.train_prefetcher=data_prefetcher(self.train_loader)
self.model=model
self.optimizer=optimizer
self.criteria=criteria
self.args = args
self.fp16=args.fp16
self.code_archive=self.get_code_archive()
if checkpoint:
if 'progress_table' in checkpoint:
self.progress_table = checkpoint['progress_table']
else:
self.progress_table=[]
if 'epoch' in checkpoint:
self.start_epoch = checkpoint['epoch']+1
else:
self.start_epoch = 0
if 'best_loss' in checkpoint:
self.best_loss = checkpoint['best_loss']
else:
self.best_loss = 9e9
if 'stats' in checkpoint:
self.stats = checkpoint['stats']
else:
self.stats=[]
if 'loss_history' in checkpoint:
self.loss_history = checkpoint['loss_history']
else:
self.loss_history=[]
else:
self.progress_table=[]
self.best_loss = 9e9
self.stats = []
self.start_epoch = 0
self.loss_history=[]
self.lr0 = get_average_learning_rate(optimizer)
print_table(self.progress_table,False)
self.ticks=0
self.last_tick=0
self.loss_tracking_window = args.loss_tracking_window_initial
def get_code_archive(self):
file_contents={}
for i in os.listdir('.'):
if i[-3:]=='.py':
with open(i,'r') as file:
file_contents[i]=file.read()
return file_contents
def train(self):
for self.epoch in range(self.start_epoch,self.args.epochs):
current_learning_rate = get_average_learning_rate(self.optimizer)
if current_learning_rate < self.args.minimum_learning_rate:
break
# train for one epoch
train_string, train_stats = self.train_epoch()
# evaluate on validation set
progress_string=train_string
loss, progress_string, val_stats = self.validate(progress_string)
print()
self.progress_table.append(progress_string)
self.stats.append((train_stats,val_stats))
self.checkpoint(loss)
def checkpoint(self, loss):
is_best = loss < self.best_loss
self.best_loss = min(loss, self.best_loss)
save_filename = self.args.experiment_name+'_'+self.args.arch+'_'+('p' if self.args.pretrained != '' else 'np')+'_'+self.args.tasks+'_checkpoint.pth.tar'
try:
to_save = self.model
if torch.cuda.device_count() >1:
to_save=to_save.module
gpus='all'
if 'CUDA_VISIBLE_DEVICES' in os.environ:
gpus=os.environ['CUDA_VISIBLE_DEVICES']
self.save_checkpoint({
'epoch': self.epoch,
'info':{'machine':platform.node(), 'GPUS':gpus},
'args': self.args,
'arch': self.args.arch,
'state_dict': to_save.state_dict(),
'best_loss': self.best_loss,
'optimizer' : self.optimizer.state_dict(),
'progress_table' : self.progress_table,
'stats': self.stats,
'loss_history': self.loss_history,
'code_archive':self.code_archive
}, False, self.args.model_dir, save_filename)
if is_best:
self.save_checkpoint(None, True,self.args.model_dir, save_filename)
except:
print('save checkpoint failed...')
def save_checkpoint(self,state, is_best,directory='', filename='checkpoint.pth.tar'):
path = os.path.join(directory,filename)
if is_best:
best_path = os.path.join(directory,'best_'+filename)
shutil.copyfile(path, best_path)
else:
torch.save(state, path)
def learning_rate_schedule(self):
ttest_p=0
z_diff=0
#don't reduce learning rate until the second epoch has ended
if self.epoch < 2:
return 0,0
wind=self.loss_tracking_window//(self.args.batch_size*args.virtual_batch_multiplier)
if len(self.loss_history)-self.last_tick > wind:
a = self.loss_history[-wind:-wind*5//8]
b = self.loss_history[-wind*3//8:]
#remove outliers
a = sorted(a)
b = sorted(b)
a = a[int(len(a)*.05):int(len(a)*.95)]
b = b[int(len(b)*.05):int(len(b)*.95)]
length_=min(len(a),len(b))
a=a[:length_]
b=b[:length_]
z_diff,ttest_p = scipy.stats.ttest_rel(a,b,nan_policy='omit')
if z_diff < 0 or ttest_p > .99:
self.ticks+=1
self.last_tick=len(self.loss_history)
self.adjust_learning_rate()
self.loss_tracking_window = min(self.args.maximum_loss_tracking_window,self.loss_tracking_window*2)
return ttest_p, z_diff
def train_epoch(self):
global program_start_time
average_meters = defaultdict(AverageMeter)
display_values = []
for name,func in self.criteria.items():
display_values.append(name)
# switch to train mode
self.model.train()
end = time.time()
epoch_start_time = time.time()
epoch_start_time2=time.time()
batch_num = 0
num_data_points=len(self.train_loader)//self.args.virtual_batch_multiplier
if num_data_points > 10000:
num_data_points = num_data_points//5
starting_learning_rate=get_average_learning_rate(self.optimizer)
while True:
if batch_num ==0:
end=time.time()
epoch_start_time2=time.time()
if num_data_points==batch_num:
break
self.percent = batch_num/num_data_points
loss_dict=None
loss=0
# accumulate gradients over multiple runs of input
for _ in range(self.args.virtual_batch_multiplier):
data_start = time.time()
input, target = self.train_prefetcher.next()
average_meters['data_time'].update(time.time() - data_start)
loss_dict2,loss2 = self.train_batch(input,target)
loss+=loss2
if loss_dict is None:
loss_dict=loss_dict2
else:
for key,value in loss_dict2.items():
loss_dict[key]+=value
# divide by the number of accumulations
loss/=self.args.virtual_batch_multiplier
for key,value in loss_dict.items():
loss_dict[key]=value/self.args.virtual_batch_multiplier
# do the weight updates and set gradients back to zero
self.update()
self.loss_history.append(float(loss))
ttest_p, z_diff = self.learning_rate_schedule()
for name,value in loss_dict.items():
try:
average_meters[name].update(value.data)
except:
average_meters[name].update(value)
elapsed_time_for_epoch = (time.time()-epoch_start_time2)
eta = (elapsed_time_for_epoch/(batch_num+.2))*(num_data_points-batch_num)
if eta >= 24*3600:
eta = 24*3600-1
batch_num+=1
current_learning_rate= get_average_learning_rate(self.optimizer)
if True:
to_print = {}
to_print['ep']= ('{0}:').format(self.epoch)
to_print['#/{0}'.format(num_data_points)]= ('{0}').format(batch_num)
to_print['lr']= ('{0:0.3g}-{1:0.3g}').format(starting_learning_rate,current_learning_rate)
to_print['eta']= ('{0}').format(time.strftime("%H:%M:%S", time.gmtime(int(eta))))
to_print['d%']=('{0:0.2g}').format(100*average_meters['data_time'].sum/elapsed_time_for_epoch)
for name in display_values:
meter = average_meters[name]
to_print[name]= ('{meter.avg:.4g}').format(meter=meter)
if batch_num < num_data_points-1:
to_print['ETA']= ('{0}').format(time.strftime("%H:%M:%S", time.gmtime(int(eta+elapsed_time_for_epoch))))
to_print['ttest']= ('{0:0.3g},{1:0.3g}').format(z_diff,ttest_p)
if batch_num % self.args.print_frequency == 0:
print_table(self.progress_table+[[to_print]])
epoch_time = time.time()-epoch_start_time
stats={'batches':num_data_points,
'learning_rate':current_learning_rate,
'Epoch time':epoch_time,
}
for name in display_values:
meter = average_meters[name]
stats[name] = meter.avg
data_time = average_meters['data_time'].sum
to_print['eta']= ('{0}').format(time.strftime("%H:%M:%S", time.gmtime(int(epoch_time))))
return [to_print], stats
def train_batch(self, input, target):
loss_dict = {}
input = input.float()
output = self.model(input)
first_loss=None
for c_name,criterion_fun in self.criteria.items():
if first_loss is None:first_loss=c_name
loss_dict[c_name]=criterion_fun(output, target)
loss = loss_dict[first_loss].clone()
loss = loss / self.args.virtual_batch_multiplier
if self.args.fp16:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss_dict, loss
def update(self):
self.optimizer.step()
self.optimizer.zero_grad()
def validate(self, train_table):
average_meters = defaultdict(AverageMeter)
self.model.eval()
epoch_start_time = time.time()
batch_num=0
num_data_points=len(self.val_loader)
prefetcher = data_prefetcher(self.val_loader)
torch.cuda.empty_cache()
with torch.no_grad():
for i in range(len(self.val_loader)):
input, target = prefetcher.next()
if batch_num ==0:
epoch_start_time2=time.time()
output = self.model(input)
loss_dict = {}
for c_name,criterion_fun in self.criteria.items():
loss_dict[c_name]=criterion_fun(output, target)
batch_num=i+1
for name,value in loss_dict.items():
try:
average_meters[name].update(value.data)
except:
average_meters[name].update(value)
eta = ((time.time()-epoch_start_time2)/(batch_num+.2))*(len(self.val_loader)-batch_num)
to_print = {}
to_print['#/{0}'.format(num_data_points)]= ('{0}').format(batch_num)
to_print['eta']= ('{0}').format(time.strftime("%H:%M:%S", time.gmtime(int(eta))))
for name in self.criteria.keys():
meter = average_meters[name]
to_print[name]= ('{meter.avg:.4g}').format(meter=meter)
progress=train_table+[to_print]
if batch_num % self.args.print_frequency == 0:
print_table(self.progress_table+[progress])
epoch_time = time.time()-epoch_start_time
stats={'batches':len(self.val_loader),
'Epoch time':epoch_time,
}
ultimate_loss = None
for name in self.criteria.keys():
meter = average_meters[name]
stats[name]=meter.avg
ultimate_loss = stats['Loss']
to_print['eta']= ('{0}').format(time.strftime("%H:%M:%S", time.gmtime(int(epoch_time))))
torch.cuda.empty_cache()
return float(ultimate_loss), progress , stats
def adjust_learning_rate(self):
self.lr = self.lr0 * (0.50 ** (self.ticks))
self.set_learning_rate(self.lr)
def set_learning_rate(self,lr):
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
#mp.set_start_method('forkserver')
args = parser.parse_args()
main(args)
| 26,764
| 35.917241
| 160
|
py
|
taskgrouping
|
taskgrouping-master/read_training_history.py
|
import argparse
import os
import torch
from collections import defaultdict
from train_taskonomy import print_table
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--model_file', '-m', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--arch', '-a', metavar='ARCH', default='',
help='model architecture: ' +
' (default: resnet18)')
parser.add_argument('--save_raw',default='')
parser.add_argument('--show_loss_plot','-s', action='store_true',
help='show loss plot')
args = parser.parse_args()
# def print_table(table_list, go_back=True):
# if go_back:
# print("\033[F",end='')
# print("\033[K",end='')
# for i in range(len(table_list)):
# print("\033[F",end='')
# print("\033[K",end='')
# lens = defaultdict(int)
# for i in table_list:
# for ii,to_print in enumerate(i):
# for title,val in to_print.items():
# lens[(title,ii)]=max(lens[(title,ii)],max(len(title),len(val)))
# # printed_table_list_header = []
# for ii,to_print in enumerate(table_list[0]):
# for title,val in to_print.items():
# print('{0:^{1}}'.format(title,lens[(title,ii)]),end=" ")
# for i in table_list:
# print()
# for ii,to_print in enumerate(i):
# for title,val in to_print.items():
# print('{0:^{1}}'.format(val,lens[(title,ii)]),end=" ",flush=True)
# print()
def create_model():
import mymodels as models
try:
model = models.__dict__[args.arch](num_classification_classes=1000,
num_segmentation_classes=21,
num_segmentation_classes2=90,
normalize=False)
except:
model = models.__dict__[args.arch]()
return model
if args.model_file:
if os.path.isfile(args.model_file):
print("=> loading checkpoint '{}'".format(args.model_file))
checkpoint = torch.load(args.model_file)
progress_table = checkpoint['progress_table']
print_table(progress_table,False)
if args.show_loss_plot:
loss_history = checkpoint['loss_history']
print(len(loss_history))
print()
import matplotlib.pyplot as plt
loss_history2 = loss_history[200:]
loss_history3 = []
cur = loss_history2[0]
for i in loss_history2:
cur = .99*cur+i*.01
loss_history3.append(cur)
plt.plot(range(len(loss_history3)),loss_history3)
plt.show()
| 2,820
| 30.696629
| 83
|
py
|
taskgrouping
|
taskgrouping-master/sync_batchnorm/replicate.py
|
# -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,226
| 32.968421
| 115
|
py
|
taskgrouping
|
taskgrouping-master/sync_batchnorm/unittest.py
|
# -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import torch
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, x, y):
adiff = float((x - y).abs().max())
if (y == 0).all():
rdiff = 'NaN'
else:
rdiff = float((adiff / y).abs().max())
message = (
'Tensor close check failed\n'
'adiff={}\n'
'rdiff={}\n'
).format(adiff, rdiff)
self.assertTrue(torch.allclose(x, y), message)
| 746
| 23.9
| 59
|
py
|
taskgrouping
|
taskgrouping-master/sync_batchnorm/batchnorm.py
|
# -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import contextlib
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
try:
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
except ImportError:
ReduceAddCoalesced = Broadcast = None
try:
from jactorch.parallel.comm import SyncMaster
from jactorch.parallel.data_parallel import JacDataParallel as DataParallelWithCallback
except ImportError:
from .comm import SyncMaster
from .replicate import DataParallelWithCallback
__all__ = [
'SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d',
'patch_sync_batchnorm', 'convert_model'
]
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dimensions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
assert ReduceAddCoalesced is not None, 'Can not use Synchronized Batch Normalization without CUDA support.'
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
if hasattr(torch, 'no_grad'):
with torch.no_grad():
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
else:
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
@contextlib.contextmanager
def patch_sync_batchnorm():
import torch.nn as nn
backup = nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d
nn.BatchNorm1d = SynchronizedBatchNorm1d
nn.BatchNorm2d = SynchronizedBatchNorm2d
nn.BatchNorm3d = SynchronizedBatchNorm3d
yield
nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d = backup
def convert_model(module):
"""Traverse the input module and its child recursively
and replace all instance of torch.nn.modules.batchnorm.BatchNorm*N*d
to SynchronizedBatchNorm*N*d
Args:
module: the input module needs to be convert to SyncBN model
Examples:
>>> import torch.nn as nn
>>> import torchvision
>>> # m is a standard pytorch model
>>> m = torchvision.models.resnet18(True)
>>> m = nn.DataParallel(m)
>>> # after convert, m is using SyncBN
>>> m = convert_model(m)
"""
if isinstance(module, torch.nn.DataParallel):
mod = module.module
mod = convert_model(mod)
mod = DataParallelWithCallback(mod)
return mod
mod = module
for pth_module, sync_module in zip([torch.nn.modules.batchnorm.BatchNorm1d,
torch.nn.modules.batchnorm.BatchNorm2d,
torch.nn.modules.batchnorm.BatchNorm3d],
[SynchronizedBatchNorm1d,
SynchronizedBatchNorm2d,
SynchronizedBatchNorm3d]):
if isinstance(module, pth_module):
mod = sync_module(module.num_features, module.eps, module.momentum, module.affine)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_model(child))
return mod
| 15,829
| 39.075949
| 116
|
py
|
taskgrouping
|
taskgrouping-master/sync_batchnorm/batchnorm_reimpl.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : batchnorm_reimpl.py
# Author : acgtyrant
# Date : 11/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
__all__ = ['BatchNorm2dReimpl']
class BatchNorm2dReimpl(nn.Module):
"""
A re-implementation of batch normalization, used for testing the numerical
stability.
Author: acgtyrant
See also:
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = nn.Parameter(torch.empty(num_features))
self.bias = nn.Parameter(torch.empty(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
init.uniform_(self.weight)
init.zeros_(self.bias)
def forward(self, input_):
batchsize, channels, height, width = input_.size()
numel = batchsize * height * width
input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
sum_ = input_.sum(1)
sum_of_square = input_.pow(2).sum(1)
mean = sum_ / numel
sumvar = sum_of_square - sum_ * mean
self.running_mean = (
(1 - self.momentum) * self.running_mean
+ self.momentum * mean.detach()
)
unbias_var = sumvar / (numel - 1)
self.running_var = (
(1 - self.momentum) * self.running_var
+ self.momentum * unbias_var.detach()
)
bias_var = sumvar / numel
inv_std = 1 / (bias_var + self.eps).pow(0.5)
output = (
(input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) *
self.weight.unsqueeze(1) + self.bias.unsqueeze(1))
return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous()
| 2,385
| 30.813333
| 95
|
py
|
taskgrouping
|
taskgrouping-master/sync_batchnorm/comm.py
|
# -*- coding: utf-8 -*-
# File : comm.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import queue
import collections
import threading
__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
class FutureResult(object):
"""A thread-safe future implementation. Used only as one-to-one pipe."""
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert self._result is None, 'Previous result has\'t been fetched.'
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if self._result is None:
self._cond.wait()
res = self._result
self._result = None
return res
_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
class SlavePipe(_SlavePipeBase):
"""Pipe for master-slave communication."""
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
class SyncMaster(object):
"""An abstract `SyncMaster` object.
- During the replication, as the data parallel will trigger an callback of each module, all slave devices should
call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
and passed to a registered callback.
- After receiving the messages, the master device should gather the information and determine to message passed
back to each slave devices.
"""
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def __getstate__(self):
return {'master_callback': self._master_callback}
def __setstate__(self, state):
self.__init__(state['master_callback'])
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
master_msg: the message that the master want to send to itself. This will be placed as the first
message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
Returns: the message to be sent back to the master device.
"""
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert results[0][0] == 0, 'The first result should belongs to the master.'
for i, res in results:
if i == 0:
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert self._queue.get() is True
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
| 4,449
| 31.246377
| 117
|
py
|
taskgrouping
|
taskgrouping-master/sync_batchnorm/__init__.py
|
# -*- coding: utf-8 -*-
# File : __init__.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
from .batchnorm import patch_sync_batchnorm, convert_model
from .replicate import DataParallelWithCallback, patch_replication_callback
| 507
| 35.285714
| 96
|
py
|
taskgrouping
|
taskgrouping-master/network_selection/make_plots.py
|
import plotly.graph_objects as go
import plotly.io as pio
from plotly.validators.scatter.marker import SymbolValidator
import plotly.express as px
#pio.templates.default = "plotly_dark"
# Add data
def make_plot(curves,name):
#color_list = ["#E69F00", "#56B4E9", "#009E73", "#F0E442", "#D55E00", "#0072B2", "#CC79A7"]
budget = [1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5]
name_to_name={'sener_et_al':'Sener et al.',
'gradnorm': 'GradNorm',
'worst':'Worst Network Choice',
'all_in_one':'Single Traditional<br>Multi-task Network',
'random':'Random gropings',
'independent':'Five Independent Networks',
'esa':'ESA (ours) 5.3.1',
'hoa':'HOA (ours) 5.3.2',
'optimal':'Optimal Network<br>Choice (ours)' }
name_to_color={'sener_et_al':7,
'gradnorm':8,
'worst':0,
'all_in_one':1,
'random':2,
'independent':3,
'esa':4,
'hoa':5,
'optimal':6}
fig = go.Figure()
symbols=['circle','square','diamond','star','hexagram','star-triangle-up','asterisk','y-up','cross']
for i,(key,val) in enumerate(curves.items()):
fig.add_trace(go.Scatter(x=budget, y=val, name=name_to_name[key],connectgaps=True ,marker_symbol=name_to_color[key],marker_size=10,line=dict(color=px.colors.qualitative.G10[name_to_color[key]])))
#line=dict(color=)
# Create and style traces
# if 'sener_et_al' in curves:
# fig.add_trace(go.Scatter(x=budget, y=curves['sener_et_al'], name='Sener et al.',connectgaps=True ,))
# if 'gradnorm' in curves:
# fig.add_trace(go.Scatter(x=budget, y=curves['gradnorm'], name='GradNorm',connectgaps=True ))
# fig.add_trace(go.Scatter(x=budget, y=curves['worst'], name='Worst Network<br> Choice',connectgaps=True ))
# fig.add_trace(go.Scatter(x=budget, y=curves['all_in_one'], name='Single Traditional<br> Multi-task Network',connectgaps=True ))
# fig.add_trace(go.Scatter(x=budget, y=curves['random'], name='Random Groupings',connectgaps=True ))
# fig.add_trace(go.Scatter(x=budget, y=curves['independent'], name='Five Independent<br> Networks',connectgaps=True ))
# fig.add_trace(go.Scatter(x=budget, y=curves['esa'], name='ESA (ours) 3.3.1',connectgaps=True ))
# fig.add_trace(go.Scatter(x=budget, y=curves['hoa'], name='HOA (ours) 3.3.2',connectgaps=True ))
# fig.add_trace(go.Scatter(x=budget, y=curves['optimal'], name='Optimal Network<br> Choice (ours)',connectgaps=True ))
# Edit the layout
fig.update_layout(title=dict(text='Performance vs Compute', font=dict(size=22,color='black')),
xaxis_title=dict(text='Inference Time Cost',font=dict(size=18,color='black')),
yaxis_title=dict(text='Total Loss (lower is better)',font=dict(size=18,color='black')),
legend=dict(font=dict(color='black',size=16)),
#colorway=px.colors.qualitative.G10,
xaxis=dict(
showline=True,
showgrid=False,
showticklabels=True,
linecolor='rgb(0, 0, 0)',
linewidth=1,
ticks='outside',
tickfont=dict(
family='Arial',
size=15,
color='rgb(0, 0, 0)',
),
),
yaxis=dict(
showgrid=True,
#zeroline=False,
ticks='outside',
showline=True,
showticklabels=True,
linecolor='rgb(0, 0, 0)',
linewidth=1,
tickfont=dict(
family='Arial',
size=15,
color='rgb(0, 0, 0)',
),
),
autosize=False,
margin=dict(
autoexpand=False,
l=58,
r=240,
t=32,
b=47
),
width=600,
height=100+27*len(curves),
#showlegend=False,
plot_bgcolor='white'
)
fig.write_image('plots/'+name+'.pdf')
#fig.show()
curves_1=dict(
sener_et_al = [0.5621, None, 0.5556, None, None, None, 0.5471],
gradnorm = [0.5148, None, None, None, None, None, 0.5001],
worst = [0.50278, 0.50278, 0.50278, 0.50278, 0.50278, 0.50278, 0.50179, 0.50179, 0.49941],
all_in_one = [0.50273, None, 0.4916, 0.48873, None, None, 0.4883],
random = [0.50278, 0.485347, 0.473641, 0.469079, 0.465265, 0.46271, 0.460238, 0.458358, 0.456486],
independent = [0.51456, 0.50139, 0.47704, 0.46515, None, None, 0.45456, None, 0.44774],
esa = [0.50273, 0.48732, 0.46727, 0.46063, 0.45722, 0.45058, 0.45058, 0.44742, 0.44742],
hoa = [0.50278, 0.46132, 0.45474, 0.4505, 0.44875, 0.44489, 0.44112, 0.44552, 0.44196],
optimal = [0.50273, 0.46132, 0.45224, 0.44612, 0.44235, 0.43932, 0.43555, 0.43555, 0.43481],
)
curves_2=dict(
worst = [0.35989, 0.36554, 0.36926, 0.36936, 0.36956, 0.36956, 0.36956, 0.36956, 0.36956],
independent = [0.37276,0.35715,0.35926,0.36188,None,None,0.35384,None,0.35216],
all_in_one = [0.35989,None,0.35408,None,0.35431,None,0.35295,None,None],
random = [0.35989, 0.360109, 0.357285, 0.355924, 0.353176, 0.351664, 0.349508, 0.348102, 0.346303] ,
esa = [0.35989, 0.35989, 0.34696, 0.34696, 0.34483, 0.34483, 0.34483, 0.34483, 0.34483],
hoa = [0.35989, 0.35758, 0.31733, 0.31562, 0.31177, 0.30525, 0.3019, 0.3019, 0.30187],
optimal = [0.35989, 0.35478, 0.31733, 0.3145, 0.30606, 0.3049, 0.3019, 0.3019, 0.30167],
)
curves_3=dict(
worst = [0.42998, 0.47544, 0.47182, 0.47205, 0.4717, 0.47066, 0.46857, 0.46702, 0.46495] ,
all_in_one = [0.42998, None, None, None, None, None, 0.44391 ],
random = [0.42998, 0.439361, 0.439917, 0.435501, 0.431542, 0.427947, 0.424582, 0.421834, 0.419124],
independent = [0.41805, None, None, 0.4262, None, None, None, None, 0.40643],
esa = [0.42998, 0.44778, 0.43055, 0.39507, 0.40381, 0.39404, 0.40278, 0.39404, 0.40278] ,
hoa = [0.42998, 0.44778, 0.40887, 0.38776, 0.38352, 0.38682, 0.38574, 0.38574, 0.38471] ,
optimal = [0.42998, 0.42275, 0.40857, 0.38776, 0.38352, 0.38352, 0.38249, 0.38249, 0.38249],
)
curves_4=dict(
worst = [0.684042, 0.689178, 0.696036, 0.698235, 0.700446, 0.701056, 0.701056, 0.701056, 0.701056],
independent= [0.698867,None,None,0.692437,None,None,None,None,0.685578],
random = [0.684042, 0.683817, 0.681984, 0.681949, 0.680581, 0.6801, 0.679037, 0.678471, 0.677633] ,
esa = [0.684042, 0.684042, 0.677567, 0.680649, 0.677349, 0.676049, 0.676049, 0.675976, 0.675976] ,
all_in_one = [0.684042,None,None,None,None,None,0.672991],
hoa = [0.684042, 0.678697, 0.674597, 0.671067, 0.669696, 0.671867, 0.670496, 0.670496, 0.670496] ,
optimal = [0.684042, 0.678697, 0.674597, 0.671067, 0.669696, 0.669696, 0.668986, 0.668986, 0.668986],
)
make_plot(curves_1,'setting_1')
make_plot(curves_2,'setting_2')
make_plot(curves_3,'setting_3')
make_plot(curves_4,'setting_4')
| 7,507
| 45.925
| 203
|
py
|
taskgrouping
|
taskgrouping-master/model_definitions/ozan_rep_fun.py
|
import torch.autograd
import sys
import math
from .ozan_min_norm_solvers import MinNormSolver
import statistics
class OzanRepFunction(torch.autograd.Function):
# def __init__(self,copies,noop=False):
# super(OzanRepFunction,self).__init__()
# self.copies=copies
# self.noop=noop
n=5
def __init__(self):
super(OzanRepFunction, self).__init__()
@staticmethod
def forward(ctx, input):
shape = input.shape
ret = input.expand(OzanRepFunction.n,*shape)
return ret.clone() # REASON FOR ERROR: forgot to .clone() here
#@staticmethod
# def backward(ctx, grad_output):
# # print("backward",grad_output.shape)
# # print()
# # print()
# if grad_output.shape[0]==2:
# theta0,theta1=grad_output[0].view(-1).float(), grad_output[1].view(-1).float()
# diff = theta0-theta1
# num = diff.dot(theta0)
# denom = (diff.dot(diff)+.00000001)
# a = num/denom
# a1=float(a)
# a = a.clamp(0,1)
# a = float(a)
# # print(float(a),a1,float(num),float(denom))
# # print()
# # print()
# def get_out_for_a(a):
# return grad_output[0]*(1-a)+grad_output[1]*a
# def get_score_for_a(a):
# out = get_out_for_a(a)
# vec = out.view(-1)
# score = vec.dot(vec)
# return float(score)
# # print(0,get_score_for_a(0),
# # .1,get_score_for_a(0.1),
# # .2,get_score_for_a(0.2),
# # .3,get_score_for_a(0.3),
# # .4,get_score_for_a(0.4),
# # .5,get_score_for_a(0.5),
# # .6,get_score_for_a(0.6),
# # .7,get_score_for_a(0.7),
# # .8,get_score_for_a(0.8),
# # .9,get_score_for_a(0.9),
# # 1,get_score_for_a(1))
# # print(a,get_score_for_a(a))
# # print()
# # print()
# out = get_out_for_a(a)
# #out=out*2
# elif grad_output.shape[0]==1:
# grad_input=grad_output.clone()
# out = grad_input.sum(dim=0)
# else:
# pass
# return out
@staticmethod
def backward(ctx, grad_output):
num_grads = grad_output.shape[0]
batch_size = grad_output.shape[1]
# print(num_grads)
# print(num_grads)
# print(num_grads)
#print(grad_output.shape)
# print(grad_output.shape)
# print(grad_output.shape)
# print(num_grads)
# print(num_grads)
if num_grads>=2:
#print ('shape in = ',grad_output[0].view(batch_size,-1).float().shape)
try:
alphas, score = MinNormSolver.find_min_norm_element([grad_output[i].view(batch_size,-1).float() for i in range(num_grads)])
#print(alphas)
except ValueError as error:
alphas = [1/num_grads for i in range(num_grads)]
#print('outs shape',out.shape)
#print('alphas shape',alphas.shape)
#out = out.view()
#out = torch.zeros_like(grad_output[0])
# print(alphas)
# print()
# print()
grad_outputs = [grad_output[i]*alphas[i]*math.sqrt(num_grads) for i in range(num_grads)]
output = grad_outputs[0]
for i in range(1,num_grads):
output+=grad_outputs[i]
return output
elif num_grads==1:
grad_input=grad_output.clone()
out = grad_input.sum(dim=0)
else:
pass
return out
ozan_rep_function = OzanRepFunction.apply
class TrevorRepFunction(torch.autograd.Function):
n=5
def __init__(self):
super(TrevorRepFunction, self).__init__()
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
def backward(ctx, grad_output):
#num_grads = grad_output.shape[0]
#print(num_grads)
grad_input=grad_output.clone()
mul = 1.0/math.sqrt(TrevorRepFunction.n)
out = grad_input * mul
return out
trevor_rep_function = TrevorRepFunction.apply
count = 0
class GradNormRepFunction(torch.autograd.Function):
n=5
inital_task_losses=None
current_task_losses=None
current_weights=None
def __init__(self):
super(GradNormRepFunction, self).__init__()
@staticmethod
def forward(ctx, input):
shape = input.shape
ret = input.expand(GradNormRepFunction.n,*shape)
return ret.clone()
@staticmethod
def backward(ctx, grad_output):
global count
num_grads = grad_output.shape[0]
batch_size = grad_output.shape[1]
grad_output=grad_output.float()
if num_grads>=2:
GiW = [torch.sqrt(grad_output[i].reshape(-1).dot(grad_output[i].reshape(-1)))*GradNormRepFunction.current_weights[i] for i in range(num_grads)]
GW_bar = torch.mean(torch.stack(GiW))
try:
Li_ratio=[c/max(i,.0000001) for c,i in zip(GradNormRepFunction.current_task_losses,GradNormRepFunction.inital_task_losses)]
mean_ratio = statistics.mean(Li_ratio)
ri = [lir/max(mean_ratio,.00000001) for lir in Li_ratio]
target_grad=[float(GW_bar * (max(r_i,.00000001)**1.5)) for r_i in ri]
target_weight= [float(target_grad[i]/float(GiW[i])) for i in range(num_grads)]
total_weight = sum(target_weight)
total_weight = max(.0000001,total_weight)
target_weight=[i*num_grads/total_weight for i in target_weight]
for i in range(len(GradNormRepFunction.current_weights)):
wi = GradNormRepFunction.current_weights[i]
GradNormRepFunction.current_weights[i]+=(.0001*wi if (wi<target_weight[i]) else -.0001*wi)
# print('Li_ratio',Li_ratio)
# print('mean_ratio',mean_ratio)
# print('ri',ri)
# print('target_weight',target_weight)
# print('current_weights',GradNormRepFunction.current_weights)
# print()
# print()
count+=1
if count % 80==0:
with open("gradnorm_weights.txt", "a") as myfile:
myfile.write('target: '+str(target_weight)+'\n')
total_weight = sum(GradNormRepFunction.current_weights)
total_weight = max(.0000001,total_weight)
GradNormRepFunction.current_weights = [i*num_grads/total_weight for i in GradNormRepFunction.current_weights]
except:
pass
grad_outputs = [grad_output[i]*GradNormRepFunction.current_weights[i]*(1/math.sqrt(num_grads)) for i in range(num_grads)]
output = grad_outputs[0]
for i in range(1,num_grads):
output+=grad_outputs[i]
return output.half()
elif num_grads==1:
grad_input=grad_output.clone()
out = grad_input.sum(dim=0)
else:
pass
return out
gradnorm_rep_function = GradNormRepFunction.apply
| 7,442
| 34.442857
| 155
|
py
|
taskgrouping
|
taskgrouping-master/model_definitions/xception_taskonomy_small.py
|
"""
Creates an Xception Model as defined in:
Francois Chollet
Xception: Deep Learning with Depthwise Separable Convolutions
https://arxiv.org/pdf/1610.02357.pdf
This weights ported from the Keras implementation. Achieves the following performance on the validation set:
Loss:0.9173 Prec@1:78.892 Prec@5:94.292
REMEMBER to set your image size to 3x299x299 for both test and validation
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
"""
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn import init
import torch
from .ozan_rep_fun import ozan_rep_function,trevor_rep_function,OzanRepFunction,TrevorRepFunction,gradnorm_rep_function,GradNormRepFunction
__all__ = ['xception_taskonomy_small','xception_taskonomy_small_gradnorm','xception_taskonomy_small_ozan','xception_taskonomy_small_half','xception_taskonomy_small_quad','xception_taskonomy_small_double','xception_taskonomy_small_double_ozan','xception_taskonomy_small_half_ozan','xception_taskonomy_small_quad_ozan']
# model_urls = {
# 'xception_taskonomy':'file:///home/tstand/Dropbox/taskonomy/xception_taskonomy-a4b32ef7.pth.tar'
# }
class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False,groupsize=1):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=max(1,in_channels//groupsize),bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self,in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True):
super(Block, self).__init__()
if out_filters != in_filters or strides!=1:
self.skip = nn.Conv2d(in_filters,out_filters,1,stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip=None
self.relu = nn.ReLU(inplace=True)
rep=[]
filters=in_filters
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps-1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters,filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters=out_filters
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if strides != 1:
#rep.append(nn.AvgPool2d(3,strides,1))
rep.append(nn.Conv2d(filters,filters,2,2))
self.rep = nn.Sequential(*rep)
def forward(self,inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x+=skip
return x
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(3, 24, 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(24,48,3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(48)
#do relu here
self.block1=Block(48,96,2,2,start_with_relu=False,grow_first=True)
self.block2=Block(96,192,2,2,start_with_relu=True,grow_first=True)
self.block3=Block(192,512,2,2,start_with_relu=True,grow_first=True)
#self.block4=Block(768,768,3,1,start_with_relu=True,grow_first=True)
#self.block5=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block6=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block7=Block(768,768,3,1,start_with_relu=True,grow_first=True)
self.block8=Block(512,512,2,1,start_with_relu=True,grow_first=True)
self.block9=Block(512,512,2,1,start_with_relu=True,grow_first=True)
self.block10=Block(512,512,2,1,start_with_relu=True,grow_first=True)
self.block11=Block(512,512,2,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(512,256,3,1,1)
self.bn3 = nn.BatchNorm2d(256)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
# x = self.block4(x)
# x = self.block5(x)
# x = self.block6(x)
# x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
class EncoderHalf(nn.Module):
def __init__(self):
super(EncoderHalf, self).__init__()
self.conv1 = nn.Conv2d(3, 24, 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(24,48,3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(48)
#do relu here
self.block1=Block(48,64,2,2,start_with_relu=False,grow_first=True)
self.block2=Block(64,128,2,2,start_with_relu=True,grow_first=True)
self.block3=Block(128,360,2,2,start_with_relu=True,grow_first=True)
#self.block4=Block(768,768,3,1,start_with_relu=True,grow_first=True)
#self.block5=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block6=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block7=Block(768,768,3,1,start_with_relu=True,grow_first=True)
self.block8=Block(360,360,2,1,start_with_relu=True,grow_first=True)
self.block9=Block(360,360,2,1,start_with_relu=True,grow_first=True)
self.block10=Block(360,360,2,1,start_with_relu=True,grow_first=True)
self.block11=Block(360,360,2,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(360,256,3,1,1)
self.bn3 = nn.BatchNorm2d(256)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
# x = self.block4(x)
# x = self.block5(x)
# x = self.block6(x)
# x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
class EncoderQuad(nn.Module):
def __init__(self):
super(EncoderQuad, self).__init__()
print('entering quad constructor')
self.conv1 = nn.Conv2d(3, 48, 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(48)
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(48,96,3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(96)
#do relu here
self.block1=Block(96,192,2,2,start_with_relu=False,grow_first=True)
self.block2=Block(192,384,2,2,start_with_relu=True,grow_first=True)
self.block3=Block(384,1024,2,2,start_with_relu=True,grow_first=True)
#self.block4=Block(768,768,3,1,start_with_relu=True,grow_first=True)
#self.block5=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block6=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block7=Block(768,768,3,1,start_with_relu=True,grow_first=True)
self.block8=Block(1024,1024,2,1,start_with_relu=True,grow_first=True)
self.block9=Block(1024,1024,2,1,start_with_relu=True,grow_first=True)
self.block10=Block(1024,1024,2,1,start_with_relu=True,grow_first=True)
self.block11=Block(1024,1024,2,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(1024,256,3,1,1)
self.bn3 = nn.BatchNorm2d(256)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
# x = self.block4(x)
# x = self.block5(x)
# x = self.block6(x)
# x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
class EncoderDouble(nn.Module):
def __init__(self):
super(EncoderDouble, self).__init__()
print('entering double constructor')
self.conv1 = nn.Conv2d(3, 32, 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(32,64,3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(64)
#do relu here
self.block1=Block(64,128,2,2,start_with_relu=False,grow_first=True)
self.block2=Block(128,256,2,2,start_with_relu=True,grow_first=True)
self.block3=Block(256,728,2,2,start_with_relu=True,grow_first=True)
#self.block4=Block(768,768,3,1,start_with_relu=True,grow_first=True)
#self.block5=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block6=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block7=Block(768,768,3,1,start_with_relu=True,grow_first=True)
self.block8=Block(728,728,2,1,start_with_relu=True,grow_first=True)
self.block9=Block(728,728,2,1,start_with_relu=True,grow_first=True)
self.block10=Block(728,728,2,1,start_with_relu=True,grow_first=True)
self.block11=Block(728,728,2,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(728,256,3,1,1)
self.bn3 = nn.BatchNorm2d(256)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
# x = self.block4(x)
# x = self.block5(x)
# x = self.block6(x)
# x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
def interpolate(inp,size):
t = inp.type()
inp = inp.float()
out = nn.functional.interpolate(inp,size=size,mode='bilinear',align_corners=False)
if out.type()!=t:
out = out.half()
return out
class Decoder(nn.Module):
def __init__(self, output_channels=32,num_classes=None):
super(Decoder, self).__init__()
self.output_channels = output_channels
self.num_classes = num_classes
if num_classes is not None:
self.fc = nn.Linear(256, num_classes)
#else:
# self.fc = nn.Linear(256, 1000)
else:
self.relu = nn.ReLU(inplace=True)
self.conv_decode_res = SeparableConv2d(256, 16, 3,padding=1)
self.conv_decode_res2 = SeparableConv2d(256, 96, 3,padding=1)
self.bn_conv_decode_res = nn.BatchNorm2d(16)
self.bn_conv_decode_res2 = nn.BatchNorm2d(96)
self.upconv1 = nn.ConvTranspose2d(96,96,2,2)
self.bn_upconv1 = nn.BatchNorm2d(96)
self.conv_decode1 = SeparableConv2d(96, 64, 3,padding=1)
self.bn_decode1 = nn.BatchNorm2d(64)
self.upconv2 = nn.ConvTranspose2d(64,64,2,2)
self.bn_upconv2 = nn.BatchNorm2d(64)
self.conv_decode2 = SeparableConv2d(64, 64, 5,padding=2)
self.bn_decode2 = nn.BatchNorm2d(64)
self.upconv3 = nn.ConvTranspose2d(64,32,2,2)
self.bn_upconv3 = nn.BatchNorm2d(32)
self.conv_decode3 = SeparableConv2d(32, 32, 5,padding=2)
self.bn_decode3 = nn.BatchNorm2d(32)
self.upconv4 = nn.ConvTranspose2d(32,32,2,2)
self.bn_upconv4 = nn.BatchNorm2d(32)
self.conv_decode4 = SeparableConv2d(48, output_channels, 5,padding=2)
def forward(self,representation):
#batch_size=representation.shape[0]
if self.num_classes is None:
x2 = self.conv_decode_res(representation)
x2 = self.bn_conv_decode_res(x2)
x2 = interpolate(x2,size=(256,256))
x = self.conv_decode_res2(representation)
x = self.bn_conv_decode_res2(x)
x = self.upconv1(x)
x = self.bn_upconv1(x)
x = self.relu(x)
x = self.conv_decode1(x)
x = self.bn_decode1(x)
x = self.relu(x)
x = self.upconv2(x)
x = self.bn_upconv2(x)
x = self.relu(x)
x = self.conv_decode2(x)
x = self.bn_decode2(x)
x = self.relu(x)
x = self.upconv3(x)
x = self.bn_upconv3(x)
x = self.relu(x)
x = self.conv_decode3(x)
x = self.bn_decode3(x)
x = self.relu(x)
x = self.upconv4(x)
x = self.bn_upconv4(x)
x = torch.cat([x,x2],1)
#print(x.shape,self.static.shape)
#x = torch.cat([x,x2,input,self.static.expand(batch_size,-1,-1,-1)],1)
x = self.relu(x)
x = self.conv_decode4(x)
#z = x[:,19:22,:,:].clone()
#y = (z).norm(2,1,True).clamp(min=1e-12)
#print(y.shape,x[:,21:24,:,:].shape)
#x[:,19:22,:,:]=z/y
else:
#print(representation.shape)
x = F.adaptive_avg_pool2d(representation, (1, 1))
x = x.view(x.size(0), -1)
#print(x.shape)
x = self.fc(x)
#print(x.shape)
return x
class XceptionTaskonomySmall(nn.Module):
"""
Xception optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1610.02357.pdf
"""
def __init__(self, tasks=None,num_classes=None, ozan=False, half=False):
""" Constructor
Args:
num_classes: number of classes
"""
super(XceptionTaskonomySmall, self).__init__()
print('half is',half)
if half=='Quad':
print('running quad code')
self.encoder=EncoderQuad()
elif half == 'Double':
self.encoder=EncoderDouble()
elif half:
self.encoder=EncoderHalf()
else:
self.encoder=Encoder()
self.tasks=tasks
self.ozan=ozan
self.task_to_decoder = {}
if tasks is not None:
for task in tasks:
if task == 'segment_semantic':
output_channels = 18
if task == 'depth_zbuffer':
output_channels = 1
if task == 'normal':
output_channels = 3
if task == 'normal2':
output_channels = 3
if task == 'edge_occlusion':
output_channels = 1
if task == 'reshading':
output_channels = 3
if task == 'keypoints2d':
output_channels = 1
if task == 'edge_texture':
output_channels = 1
decoder=Decoder(output_channels,num_classes)
self.task_to_decoder[task]=decoder
else:
self.task_to_decoder['classification']=Decoder(output_channels=0,num_classes=1000)
self.decoders = nn.ModuleList(self.task_to_decoder.values())
#------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
#-----------------------------
count=0
def input_per_task_losses(self,losses):
# if GradNormRepFunction.inital_task_losses is None:
# GradNormRepFunction.inital_task_losses=losses
# GradNormRepFunction.current_weights=[1 for i in losses]
XceptionTaskonomySmall.count+=1
if XceptionTaskonomySmall.count < 200:
GradNormRepFunction.inital_task_losses=losses
GradNormRepFunction.current_weights=[1 for i in losses]
elif XceptionTaskonomySmall.count%20==0:
with open("gradnorm_weights.txt", "a") as myfile:
myfile.write(str(XceptionTaskonomySmall.count)+': '+str(GradNormRepFunction.current_weights)+'\n')
GradNormRepFunction.current_task_losses=losses
def forward(self, input):
rep = self.encoder(input)
if self.tasks is None:
return self.decoders[0](rep)
outputs={'rep':rep}
if self.ozan=='gradnorm':
GradNormRepFunction.n=len(self.decoders)
rep = gradnorm_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep[i])
elif self.ozan:
OzanRepFunction.n=len(self.decoders)
rep = ozan_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep[i])
else:
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep)
return outputs
def xception_taskonomy_small(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
model = XceptionTaskonomySmall(**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_ozan(pretrained=False,**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomySmall(ozan=True,**kwargs)
if pretrained:
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
return model
def xception_taskonomy_small_gradnorm(pretrained=False,**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomySmall(ozan='gradnorm',**kwargs)
if pretrained:
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
return model
def xception_taskonomy_small_half(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
model = XceptionTaskonomySmall(half=True,**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_quad(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
print('got quad')
model = XceptionTaskonomySmall(half='Quad',**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_double(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
print('got double')
model = XceptionTaskonomySmall(half='Double',**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_quad_ozan(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
print('got quad ozan')
model = XceptionTaskonomySmall(ozan=True,half='Quad',**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_double_ozan(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
print('got double')
model = XceptionTaskonomySmall(ozan=True,half='Double',**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_half_ozan(pretrained=False,**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomySmall(ozan=True,half=True,**kwargs)
if pretrained:
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
return model
| 29,185
| 34.37697
| 317
|
py
|
taskgrouping
|
taskgrouping-master/model_definitions/resnet_taskonomy.py
|
import torch.nn as nn
import math
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn import init
import torch
from .ozan_rep_fun import ozan_rep_function,trevor_rep_function,OzanRepFunction,TrevorRepFunction
#from .utils import load_state_dict_from_url
__all__ = ['resnet18_taskonomy','resnet18_taskonomy_half','resnet18_taskonomy_tripple', 'resnet34_taskonomy', 'resnet50_taskonomy', 'resnet101_taskonomy',
'resnet152_taskonomy']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNetEncoder(nn.Module):
def __init__(self, block, layers,widths=[64,128,256,512], num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNetEncoder, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, widths[0], layers[0])
self.layer2 = self._make_layer(block, widths[1], layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, widths[2], layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, widths[3], layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class Decoder(nn.Module):
def __init__(self, output_channels=32,num_classes=None,base_match=512):
super(Decoder, self).__init__()
self.output_channels = output_channels
self.num_classes = num_classes
self.relu = nn.ReLU(inplace=True)
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
else:
self.upconv0 = nn.ConvTranspose2d(base_match,256,2,2)
self.bn_upconv0 = nn.BatchNorm2d(256)
self.conv_decode0 = nn.Conv2d(256, 256, 3,padding=1)
self.bn_decode0 = nn.BatchNorm2d(256)
self.upconv1 = nn.ConvTranspose2d(256,128,2,2)
self.bn_upconv1 = nn.BatchNorm2d(128)
self.conv_decode1 = nn.Conv2d(128, 128, 3,padding=1)
self.bn_decode1 = nn.BatchNorm2d(128)
self.upconv2 = nn.ConvTranspose2d(128,64,2,2)
self.bn_upconv2 = nn.BatchNorm2d(64)
self.conv_decode2 = nn.Conv2d(64, 64, 3,padding=1)
self.bn_decode2 = nn.BatchNorm2d(64)
self.upconv3 = nn.ConvTranspose2d(64,48,2,2)
self.bn_upconv3 = nn.BatchNorm2d(48)
self.conv_decode3 = nn.Conv2d(48, 48, 3,padding=1)
self.bn_decode3 = nn.BatchNorm2d(48)
self.upconv4 = nn.ConvTranspose2d(48,32,2,2)
self.bn_upconv4 = nn.BatchNorm2d(32)
self.conv_decode4 = nn.Conv2d(32, output_channels, 3,padding=1)
def forward(self,representation):
#batch_size=representation.shape[0]
if self.num_classes is None:
#x2 = self.conv_decode_res(representation)
#x2 = self.bn_conv_decode_res(x2)
#x2 = interpolate(x2,size=(256,256))
x = self.upconv0(representation)
x = self.bn_upconv0(x)
x = self.relu(x)
x = self.conv_decode0(x)
x = self.bn_decode0(x)
x = self.relu(x)
x = self.upconv1(x)
x = self.bn_upconv1(x)
x = self.relu(x)
x = self.conv_decode1(x)
x = self.bn_decode1(x)
x = self.relu(x)
x = self.upconv2(x)
x = self.bn_upconv2(x)
x = self.relu(x)
x = self.conv_decode2(x)
x = self.bn_decode2(x)
x = self.relu(x)
x = self.upconv3(x)
x = self.bn_upconv3(x)
x = self.relu(x)
x = self.conv_decode3(x)
x = self.bn_decode3(x)
x = self.relu(x)
x = self.upconv4(x)
x = self.bn_upconv4(x)
#x = torch.cat([x,x2],1)
#print(x.shape,self.static.shape)
#x = torch.cat([x,x2,input,self.static.expand(batch_size,-1,-1,-1)],1)
x = self.relu(x)
x = self.conv_decode4(x)
#z = x[:,19:22,:,:].clone()
#y = (z).norm(2,1,True).clamp(min=1e-12)
#print(y.shape,x[:,21:24,:,:].shape)
#x[:,19:22,:,:]=z/y
else:
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNet(nn.Module):
def __init__(self,block,layers,tasks=None,num_classes=None, ozan=False,size=1,**kwargs):
super(ResNet, self).__init__()
if size==1:
self.encoder=ResNetEncoder(block,layers,**kwargs)
elif size==2:
self.encoder=ResNetEncoder(block,layers,[96,192,384,720],**kwargs)
elif size==3:
self.encoder=ResNetEncoder(block,layers,[112,224,448,880],**kwargs)
elif size==0.5:
self.encoder=ResNetEncoder(block,layers,[48,96,192,360],**kwargs)
self.tasks=tasks
self.ozan=ozan
self.task_to_decoder = {}
if tasks is not None:
#self.final_conv = nn.Conv2d(728,512,3,1,1)
#self.final_conv_bn = nn.BatchNorm2d(512)
for task in tasks:
if task == 'segment_semantic':
output_channels = 18
if task == 'depth_zbuffer':
output_channels = 1
if task == 'normal':
output_channels = 3
if task == 'edge_occlusion':
output_channels = 1
if task == 'reshading':
output_channels = 3
if task == 'keypoints2d':
output_channels = 1
if task == 'edge_texture':
output_channels = 1
if size==1:
decoder=Decoder(output_channels)
elif size==2:
decoder=Decoder(output_channels,base_match=720)
elif size==3:
decoder=Decoder(output_channels,base_match=880)
elif size==0.5:
decoder=Decoder(output_channels,base_match=360)
self.task_to_decoder[task]=decoder
else:
self.task_to_decoder['classification']=Decoder(output_channels=0,num_classes=1000)
self.decoders = nn.ModuleList(self.task_to_decoder.values())
#------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
#-----------------------------
def forward(self, input):
rep = self.encoder(input)
if self.tasks is None:
return self.decoders[0](rep)
#rep = self.final_conv(rep)
#rep = self.final_conv_bn(rep)
outputs={'rep':rep}
if self.ozan:
OzanRepFunction.n=len(self.decoders)
rep = ozan_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep[i])
else:
TrevorRepFunction.n=len(self.decoders)
rep = trevor_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep)
return outputs
def _resnet(arch, block, layers, pretrained, **kwargs):
model = ResNet(block=block, layers=layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18_taskonomy(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained,
**kwargs)
def resnet18_taskonomy_tripple(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained,size=3,
**kwargs)
def resnet18_taskonomy_half(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained,size=0.5,
**kwargs)
def resnet34_taskonomy(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained,
**kwargs)
def resnet50_taskonomy(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained,
**kwargs)
def resnet101_taskonomy(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained,
**kwargs)
def resnet152_taskonomy(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained,
**kwargs)
| 16,822
| 36.301552
| 154
|
py
|
taskgrouping
|
taskgrouping-master/model_definitions/__init__.py
|
from .xception_taskonomy_new import *
from .xception_taskonomy_joined_decoder import *
from .xception_taskonomy_small import *
from .resnet_taskonomy import *
| 160
| 25.833333
| 48
|
py
|
taskgrouping
|
taskgrouping-master/model_definitions/ozan_min_norm_solvers.py
|
import numpy as np
import torch
import math
class MinNormSolver:
MAX_ITER = 250
STOP_CRIT = 1e-5
def _min_norm_element_from2(v1v1, v1v2, v2v2):
"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = -1.0 * ( (v1v2 - v2v2) / (v1v1+v2v2 - 2*v1v2) )
cost = v2v2 + gamma*(v1v2 - v2v2)
return gamma, cost
def _min_norm_2d(vecs, dps):
"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j
"""
dmin = 1e99
sol=None
for i in range(len(vecs)):
for j in range(i+1,len(vecs)):
if (i,j) not in dps:
dps[(i, j)] = 0.0
for k in range(len(vecs[i])):
dps[(i,j)] += torch.dot(vecs[i][k], vecs[j][k]).item()#.data[0]
dps[(j, i)] = dps[(i, j)]
if (i,i) not in dps:
dps[(i, i)] = 0.0
for k in range(len(vecs[i])):
dps[(i,i)] += torch.dot(vecs[i][k], vecs[i][k]).item()#.data[0]
if (j,j) not in dps:
dps[(j, j)] = 0.0
for k in range(len(vecs[i])):
dps[(j, j)] += torch.dot(vecs[j][k], vecs[j][k]).item()#.data[0]
c,d = MinNormSolver._min_norm_element_from2(dps[(i,i)], dps[(i,j)], dps[(j,j)])
#print('c,d',c,d)
if d < dmin:
dmin = d
sol = [(i,j),c,d]
if sol is None or math.isnan(c):
raise ValueError('A numeric instability occured in ozan_min_norm_solvers.')
return sol, dps
def _projection2simplex(y):
"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0)/m
for i in range(m-1):
tmpsum+= sorted_y[i]
tmax = (tmpsum - 1)/ (i+1.0)
if tmax > sorted_y[i+1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
def _next_point(cur_val, grad, n):
proj_grad = grad - ( np.sum(grad) / n )
tm1 = -1.0*cur_val[proj_grad<0]/proj_grad[proj_grad<0]
tm2 = (1.0 - cur_val[proj_grad>0])/(proj_grad[proj_grad>0])
skippers = np.sum(tm1<1e-7) + np.sum(tm2<1e-7)
t = 1
if len(tm1[tm1>1e-7]) > 0:
t = np.min(tm1[tm1>1e-7])
if len(tm2[tm2>1e-7]) > 0:
t = min(t, np.min(tm2[tm2>1e-7]))
next_point = proj_grad*t + cur_val
next_point = MinNormSolver._projection2simplex(next_point)
return next_point
def find_min_norm_element(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n=len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec , init_sol[2]
iter_count = 0
grad_mat = np.zeros((n,n))
for i in range(n):
for j in range(n):
grad_mat[i,j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
grad_dir = -1.0*np.dot(grad_mat, sol_vec)
new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i]*sol_vec[j]*dps[(i,j)]
v1v2 += sol_vec[i]*new_point[j]*dps[(i,j)]
v2v2 += new_point[i]*new_point[j]*dps[(i,j)]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc*sol_vec + (1-nc)*new_point
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def find_min_norm_element_FW(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n=len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec , init_sol[2]
iter_count = 0
grad_mat = np.zeros((n,n))
for i in range(n):
for j in range(n):
grad_mat[i,j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[t_iter, t_iter]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc*sol_vec
new_sol_vec[t_iter] += 1 - nc
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def gradient_normalizers(grads, losses, normalization_type):
gn = {}
if normalization_type == 'l2':
for t in grads:
gn[t] = np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'loss':
for t in grads:
gn[t] = losses[t]
elif normalization_type == 'loss+':
for t in grads:
gn[t] = losses[t] * np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'none':
for t in grads:
gn[t] = 1.0
else:
print('ERROR: Invalid Normalization Type')
return gn
| 7,628
| 36.214634
| 147
|
py
|
taskgrouping
|
taskgrouping-master/model_definitions/xception_taskonomy_joined_decoder.py
|
"""
Creates an Xception Model as defined in:
Francois Chollet
Xception: Deep Learning with Depthwise Separable Convolutions
https://arxiv.org/pdf/1610.02357.pdf
This weights ported from the Keras implementation. Achieves the following performance on the validation set:
Loss:0.9173 Prec@1:78.892 Prec@5:94.292
REMEMBER to set your image size to 3x299x299 for both test and validation
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
"""
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn import init
import torch
from .ozan_rep_fun import ozan_rep_function,trevor_rep_function,OzanRepFunction,TrevorRepFunction
__all__ = ['xception_taskonomy_joined_decoder','xception_taskonomy_joined_decoder_fifth','xception_taskonomy_joined_decoder_quad','xception_taskonomy_joined_decoder_half','xception_taskonomy_joined_decoder_80','xception_taskonomy_joined_decoder_ozan']
# model_urls = {
# 'xception_taskonomy':'file:///home/tstand/Dropbox/taskonomy/xception_taskonomy-a4b32ef7.pth.tar'
# }
class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False,groupsize=1):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=max(1,in_channels//groupsize),bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
#self.conv1=nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding,dilation,bias=bias)
#self.pointwise=lambda x:x
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self,in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True):
super(Block, self).__init__()
if out_filters != in_filters or strides!=1:
self.skip = nn.Conv2d(in_filters,out_filters,1,stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip=None
self.relu = nn.ReLU(inplace=True)
rep=[]
filters=in_filters
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps-1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters,filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters=out_filters
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if strides != 1:
#rep.append(nn.AvgPool2d(3,strides,1))
rep.append(nn.Conv2d(filters,filters,2,2))
self.rep = nn.Sequential(*rep)
def forward(self,inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x+=skip
return x
class Encoder(nn.Module):
def __init__(self, sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(3, sizes[0], 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(sizes[0])
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(sizes[0],sizes[1],3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(sizes[1])
#do relu here
self.block1=Block(sizes[1],sizes[2],2,2,start_with_relu=False,grow_first=True)
self.block2=Block(sizes[2],sizes[3],2,2,start_with_relu=True,grow_first=True)
self.block3=Block(sizes[3],sizes[4],2,2,start_with_relu=True,grow_first=True)
self.block4=Block(sizes[4],sizes[5],3,1,start_with_relu=True,grow_first=True)
self.block5=Block(sizes[5],sizes[6],3,1,start_with_relu=True,grow_first=True)
self.block6=Block(sizes[6],sizes[7],3,1,start_with_relu=True,grow_first=True)
self.block7=Block(sizes[7],sizes[8],3,1,start_with_relu=True,grow_first=True)
self.block8=Block(sizes[8],sizes[9],3,1,start_with_relu=True,grow_first=True)
self.block9=Block(sizes[9],sizes[10],3,1,start_with_relu=True,grow_first=True)
self.block10=Block(sizes[10],sizes[11],3,1,start_with_relu=True,grow_first=True)
self.block11=Block(sizes[11],sizes[12],3,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
#self.conv3 = SeparableConv2d(768,512,3,1,1)
#self.bn3 = nn.BatchNorm2d(512)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
#x = self.conv3(x)
#x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
def interpolate(inp,size):
t = inp.type()
inp = inp.float()
out = nn.functional.interpolate(inp,size=size,mode='bilinear',align_corners=False)
if out.type()!=t:
out = out.half()
return out
class Decoder(nn.Module):
def __init__(self, output_channels=32,num_classes=None):
super(Decoder, self).__init__()
self.output_channels = output_channels
self.num_classes = num_classes
self.relu = nn.ReLU(inplace=True)
if num_classes is not None:
self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(1024,1536,3,1,1)
self.bn3 = nn.BatchNorm2d(1536)
#do relu here
self.conv4 = SeparableConv2d(1536,2048,3,1,1)
self.bn4 = nn.BatchNorm2d(2048)
self.fc = nn.Linear(2048, num_classes)
else:
self.upconv1 = nn.ConvTranspose2d(512,128,2,2)
self.bn_upconv1 = nn.BatchNorm2d(128)
self.conv_decode1 = nn.Conv2d(128, 128, 3,padding=1)
self.bn_decode1 = nn.BatchNorm2d(128)
self.upconv2 = nn.ConvTranspose2d(128,64,2,2)
self.bn_upconv2 = nn.BatchNorm2d(64)
self.conv_decode2 = nn.Conv2d(64, 64, 3,padding=1)
self.bn_decode2 = nn.BatchNorm2d(64)
self.upconv3 = nn.ConvTranspose2d(64,48,2,2)
self.bn_upconv3 = nn.BatchNorm2d(48)
self.conv_decode3 = nn.Conv2d(48, 48, 3,padding=1)
self.bn_decode3 = nn.BatchNorm2d(48)
self.upconv4 = nn.ConvTranspose2d(48,32,2,2)
self.bn_upconv4 = nn.BatchNorm2d(32)
self.conv_decode4 = nn.Conv2d(32, output_channels, 3,padding=1)
def forward(self,representation):
if self.num_classes is None:
x = self.upconv1(representation)
x = self.bn_upconv1(x)
x = self.relu(x)
x = self.conv_decode1(x)
x = self.bn_decode1(x)
x = self.relu(x)
x = self.upconv2(x)
x = self.bn_upconv2(x)
x = self.relu(x)
x = self.conv_decode2(x)
x = self.bn_decode2(x)
x = self.relu(x)
x = self.upconv3(x)
x = self.bn_upconv3(x)
x = self.relu(x)
x = self.conv_decode3(x)
x = self.bn_decode3(x)
x = self.relu(x)
x = self.upconv4(x)
x = self.bn_upconv4(x)
x = self.relu(x)
x = self.conv_decode4(x)
else:
x = self.block12(representation)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class XceptionTaskonomy(nn.Module):
"""
Xception optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1610.02357.pdf
"""
def __init__(self,size=1, tasks=None,num_classes=None, ozan=False):
""" Constructor
Args:
num_classes: number of classes
"""
super(XceptionTaskonomy, self).__init__()
pre_rep_size=728
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
if size == 1:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.2:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.3:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.4:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.5:
sizes=[24,48,96,192,512,512,512,512,512,512,512,512,512]
elif size==.8:
sizes=[32,64,128,248,648,648,648,648,648,648,648,648,648]
elif size==2:
sizes=[32,64, 128,256, 728, 728, 728, 728, 728, 728, 728, 728, 728]
elif size==4:
sizes=[64,128,256,512,1456,1456,1456,1456,1456,1456,1456,1456,1456]
self.encoder=Encoder(sizes=sizes)
pre_rep_size=sizes[-1]
self.tasks=tasks
self.ozan=ozan
self.task_to_decoder = {}
if tasks is not None:
self.final_conv = SeparableConv2d(pre_rep_size,512,3,1,1)
self.final_conv_bn = nn.BatchNorm2d(512)
output_channels=0
self.channels_per_task = {'segment_semantic':18,
'depth_zbuffer':1,
'normal':3,
'edge_occlusion':1,
'reshading':3,
'keypoints2d':1,
'edge_texture':1,
}
for task in tasks:
output_channels+=self.channels_per_task[task]
self.decoder=Decoder(output_channels)
else:
self.decoder=Decoder(output_channels=0,num_classes=1000)
#------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
#-----------------------------
def forward(self, input):
rep = self.encoder(input)
if self.tasks is None:
return self.decoder(rep)
rep = self.final_conv(rep)
rep = self.final_conv_bn(rep)
outputs = {}
raw_output=self.decoder(rep)
range_start = 0
#print(raw_output.shape)
for task in self.tasks:
outputs[task]=raw_output[:,range_start:range_start+self.channels_per_task[task],:,:]
range_start+=self.channels_per_task[task]
return outputs
def xception_taskonomy_joined_decoder(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=1)
return model
def xception_taskonomy_joined_decoder_fifth(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.2)
return model
def xception_taskonomy_joined_decoder_quad(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=4)
return model
def xception_taskonomy_joined_decoder_half(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.5)
return model
def xception_taskonomy_joined_decoder_80(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.8)
return model
def xception_taskonomy_joined_decoder_ozan(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(ozan=True,**kwargs)
return model
| 13,355
| 31.183133
| 251
|
py
|
taskgrouping
|
taskgrouping-master/model_definitions/xception_taskonomy_new.py
|
"""
"""
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn import init
import torch
from .ozan_rep_fun import ozan_rep_function,trevor_rep_function,OzanRepFunction,TrevorRepFunction
__all__ = ['xception_taskonomy_new','xception_taskonomy_new_fifth','xception_taskonomy_new_quad','xception_taskonomy_new_half','xception_taskonomy_new_80','xception_taskonomy_ozan']
# model_urls = {
# 'xception_taskonomy':'file:///home/tstand/Dropbox/taskonomy/xception_taskonomy-a4b32ef7.pth.tar'
# }
class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False,groupsize=1):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=max(1,in_channels//groupsize),bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
#self.conv1=nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding,dilation,bias=bias)
#self.pointwise=lambda x:x
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self,in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True):
super(Block, self).__init__()
if out_filters != in_filters or strides!=1:
self.skip = nn.Conv2d(in_filters,out_filters,1,stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip=None
self.relu = nn.ReLU(inplace=True)
rep=[]
filters=in_filters
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps-1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters,filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters=out_filters
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if strides != 1:
#rep.append(nn.AvgPool2d(3,strides,1))
rep.append(nn.Conv2d(filters,filters,2,2))
self.rep = nn.Sequential(*rep)
def forward(self,inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x+=skip
return x
class Encoder(nn.Module):
def __init__(self, sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(3, sizes[0], 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(sizes[0])
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(sizes[0],sizes[1],3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(sizes[1])
#do relu here
self.block1=Block(sizes[1],sizes[2],2,2,start_with_relu=False,grow_first=True)
self.block2=Block(sizes[2],sizes[3],2,2,start_with_relu=True,grow_first=True)
self.block3=Block(sizes[3],sizes[4],2,2,start_with_relu=True,grow_first=True)
self.block4=Block(sizes[4],sizes[5],3,1,start_with_relu=True,grow_first=True)
self.block5=Block(sizes[5],sizes[6],3,1,start_with_relu=True,grow_first=True)
self.block6=Block(sizes[6],sizes[7],3,1,start_with_relu=True,grow_first=True)
self.block7=Block(sizes[7],sizes[8],3,1,start_with_relu=True,grow_first=True)
self.block8=Block(sizes[8],sizes[9],3,1,start_with_relu=True,grow_first=True)
self.block9=Block(sizes[9],sizes[10],3,1,start_with_relu=True,grow_first=True)
self.block10=Block(sizes[10],sizes[11],3,1,start_with_relu=True,grow_first=True)
self.block11=Block(sizes[11],sizes[12],3,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
#self.conv3 = SeparableConv2d(768,512,3,1,1)
#self.bn3 = nn.BatchNorm2d(512)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
#x = self.conv3(x)
#x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
def interpolate(inp,size):
t = inp.type()
inp = inp.float()
out = nn.functional.interpolate(inp,size=size,mode='bilinear',align_corners=False)
if out.type()!=t:
out = out.half()
return out
class Decoder(nn.Module):
def __init__(self, output_channels=32,num_classes=None,half_sized_output=False,small_decoder=True):
super(Decoder, self).__init__()
self.output_channels = output_channels
self.num_classes = num_classes
self.half_sized_output=half_sized_output
self.relu = nn.ReLU(inplace=True)
if num_classes is not None:
self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(1024,1536,3,1,1)
self.bn3 = nn.BatchNorm2d(1536)
#do relu here
self.conv4 = SeparableConv2d(1536,2048,3,1,1)
self.bn4 = nn.BatchNorm2d(2048)
self.fc = nn.Linear(2048, num_classes)
else:
if small_decoder:
self.upconv1 = nn.ConvTranspose2d(512,128,2,2)
self.bn_upconv1 = nn.BatchNorm2d(128)
self.conv_decode1 = nn.Conv2d(128, 128, 3,padding=1)
self.bn_decode1 = nn.BatchNorm2d(128)
self.upconv2 = nn.ConvTranspose2d(128,64,2,2)
self.bn_upconv2 = nn.BatchNorm2d(64)
self.conv_decode2 = nn.Conv2d(64, 64, 3,padding=1)
self.bn_decode2 = nn.BatchNorm2d(64)
self.upconv3 = nn.ConvTranspose2d(64,48,2,2)
self.bn_upconv3 = nn.BatchNorm2d(48)
self.conv_decode3 = nn.Conv2d(48, 48, 3,padding=1)
self.bn_decode3 = nn.BatchNorm2d(48)
if half_sized_output:
self.upconv4 = nn.Identity()
self.bn_upconv4 = nn.Identity()
self.conv_decode4 = nn.Conv2d(48, output_channels, 3,padding=1)
else:
self.upconv4 = nn.ConvTranspose2d(48,32,2,2)
self.bn_upconv4 = nn.BatchNorm2d(32)
self.conv_decode4 = nn.Conv2d(32, output_channels, 3,padding=1)
else:
self.upconv1 = nn.ConvTranspose2d(512,256,2,2)
self.bn_upconv1 = nn.BatchNorm2d(256)
self.conv_decode1 = nn.Conv2d(256, 256, 3,padding=1)
self.bn_decode1 = nn.BatchNorm2d(256)
self.upconv2 = nn.ConvTranspose2d(256,128,2,2)
self.bn_upconv2 = nn.BatchNorm2d(128)
self.conv_decode2 = nn.Conv2d(128, 128, 3,padding=1)
self.bn_decode2 = nn.BatchNorm2d(128)
self.upconv3 = nn.ConvTranspose2d(128,96,2,2)
self.bn_upconv3 = nn.BatchNorm2d(96)
self.conv_decode3 = nn.Conv2d(96, 96, 3,padding=1)
self.bn_decode3 = nn.BatchNorm2d(96)
if half_sized_output:
self.upconv4 = nn.Identity()
self.bn_upconv4 = nn.Identity()
self.conv_decode4 = nn.Conv2d(96, output_channels, 3,padding=1)
else:
self.upconv4 = nn.ConvTranspose2d(96,64,2,2)
self.bn_upconv4 = nn.BatchNorm2d(64)
self.conv_decode4 = nn.Conv2d(64, output_channels, 3,padding=1)
def forward(self,representation):
if self.num_classes is None:
x = self.upconv1(representation)
x = self.bn_upconv1(x)
x = self.relu(x)
x = self.conv_decode1(x)
x = self.bn_decode1(x)
x = self.relu(x)
x = self.upconv2(x)
x = self.bn_upconv2(x)
x = self.relu(x)
x = self.conv_decode2(x)
x = self.bn_decode2(x)
x = self.relu(x)
x = self.upconv3(x)
x = self.bn_upconv3(x)
x = self.relu(x)
x = self.conv_decode3(x)
x = self.bn_decode3(x)
x = self.relu(x)
if not self.half_sized_output:
x = self.upconv4(x)
x = self.bn_upconv4(x)
x = self.relu(x)
x = self.conv_decode4(x)
else:
x = self.block12(representation)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class XceptionTaskonomy(nn.Module):
"""
Xception optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1610.02357.pdf
"""
def __init__(self,size=1, tasks=None,num_classes=None, ozan=False,half_sized_output=False):
""" Constructor
Args:
num_classes: number of classes
"""
super(XceptionTaskonomy, self).__init__()
pre_rep_size=728
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
if size == 1:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.2:
sizes=[16,32,64,256,320,320,320,320,320,320,320,320,320]
elif size==.3:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.4:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.5:
sizes=[24,48,96,192,512,512,512,512,512,512,512,512,512]
elif size==.8:
sizes=[32,64,128,248,648,648,648,648,648,648,648,648,648]
elif size==2:
sizes=[32,64, 128,256, 728, 728, 728, 728, 728, 728, 728, 728, 728]
elif size==4:
sizes=[64,128,256,512,1456,1456,1456,1456,1456,1456,1456,1456,1456]
self.encoder=Encoder(sizes=sizes)
pre_rep_size=sizes[-1]
self.tasks=tasks
self.ozan=ozan
self.task_to_decoder = {}
if tasks is not None:
self.final_conv = SeparableConv2d(pre_rep_size,512,3,1,1)
self.final_conv_bn = nn.BatchNorm2d(512)
for task in tasks:
if task == 'segment_semantic':
output_channels = 18
if task == 'depth_zbuffer':
output_channels = 1
if task == 'normal':
output_channels = 3
if task == 'edge_occlusion':
output_channels = 1
if task == 'keypoints2d':
output_channels = 1
if task == 'edge_texture':
output_channels = 1
if task == 'reshading':
output_channels = 1
if task == 'rgb':
output_channels = 3
if task == 'principal_curvature':
output_channels = 2
decoder=Decoder(output_channels,half_sized_output=half_sized_output)
self.task_to_decoder[task]=decoder
else:
self.task_to_decoder['classification']=Decoder(output_channels=0,num_classes=1000)
self.decoders = nn.ModuleList(self.task_to_decoder.values())
#------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
#-----------------------------
def forward(self, input):
rep = self.encoder(input)
if self.tasks is None:
return self.decoders[0](rep)
rep = self.final_conv(rep)
rep = self.final_conv_bn(rep)
outputs={'rep':rep}
if self.ozan:
OzanRepFunction.n=len(self.decoders)
rep = ozan_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep[i])
else:
TrevorRepFunction.n=len(self.decoders)
rep = trevor_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep)
return outputs
def xception_taskonomy_new(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=1)
return model
def xception_taskonomy_new_fifth(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.2)
return model
def xception_taskonomy_new_quad(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=4)
return model
def xception_taskonomy_new_half(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.5)
return model
def xception_taskonomy_new_80(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.8)
return model
def xception_taskonomy_ozan(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(ozan=True,**kwargs)
return model
| 14,950
| 32.979545
| 181
|
py
|
vae_lesion_deficit
|
vae_lesion_deficit-main/utils.py
|
import numpy as np
import random
from torch.utils.data import Dataset, DataLoader
from monai.transforms import *
def resize(volume, target_size):
resize_transform = Compose([Resize((target_size[0],
target_size[1],
target_size[2]))])
if len(volume.shape) == 3:
volume = np.expand_dims(volume, axis=0)
resized_volume = resize_transform(volume)
resized_volume = np.squeeze(resized_volume)
return resized_volume
class DeficitDataset(Dataset):
def __init__(self, data, labels):
self.data = data
self.labels = labels
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img = self.data[index]
return img, np.expand_dims(self.labels[index], axis=0)
def create_train_val_cal_loaders(images, labels, batch_size, continuous=False, seed=42):
'''
Create the training, validation and calibration sets given input lesions and associated labels
If the labels are continuous you need to pass the flag as TRUE otherwise they won't be normalised
:param images:
:param labels:
:param continuous:
:param seed:
:return:
'''
# Currently chooses training, validation and calibration randomly
# You will most likely need to proper sampling to ensure Positive/Negative ratios that are decent
# This is data dependent though, so you have to choose
np.random.seed(seed)
random.seed(seed)
# Shuffle data randomly -- REMEMBER! BE SMART WITH YOUR SAMPLING OF TRAIN, VAL, CAL THIS IS JUST AN EXAMPLE
indices = [i for i in range(len(images))]
np.random.shuffle(indices)
images = images[indices]
labels = labels[indices]
if continuous:
# Put them in the 0-1 range -- OPTIONAL but recommended
labels = labels - labels.min()
labels = labels / labels.max()
# Gotta z-score normalise the labels
labels = (labels - labels.mean()) / labels.std()
# 90/5/5 split - no test set, this is inference
train_l = int(0.9 * len(images))
val_l = int(0.05 * len(images))
cal_l = int(0.05 * len(images))
train_data = images[:train_l]
train_labels = labels[:train_l]
val_data = images[train_l:(train_l+val_l)]
val_labels = labels[train_l:(train_l+val_l)]
cal_data = images[(train_l + val_l):]
cal_labels = labels[(train_l + val_l):]
'''
Num workers = 0 because this DataLoader you can fit all the images and labels into RAM
Lesion-deficit datasets are usually small, so this should be possible
'''
dataset = DeficitDataset(data=train_data, labels=train_labels)
train_loader = DataLoader(dataset, batch_size=batch_size, drop_last=False,
shuffle=True, num_workers=0, pin_memory=True)
val_dataset = DeficitDataset(data=val_data, labels=val_labels)
val_loader = DataLoader(val_dataset, batch_size=batch_size, drop_last=False,
shuffle=True, num_workers=0, pin_memory=True)
cal_dataset = DeficitDataset(data=cal_data, labels=cal_labels)
cal_loader = DataLoader(cal_dataset, batch_size=batch_size, drop_last=False,
shuffle=True, num_workers=0, pin_memory=True)
return train_loader, val_loader, cal_loader
def viz_functional_parcellation(mask, grid_acc, v_idx, v, template_brain, cmap='autumn',
vmax=None, vmin=None):
if v_idx == 0:
template_slice = template_brain[:, :, v]
template_slice = np.pad(template_slice, ((6, 6), (0, 0)),
constant_values=0)
elif v_idx == 1:
template_slice = template_brain[v, :, :]
template_slice = np.pad(template_slice, ((0, 0), (6, 6)),
constant_values=0)
else:
template_slice = template_brain[:, v, :]
template_slice = np.pad(template_slice, ((6, 6), (6, 6)),
constant_values=0)
template_slice = np.rot90(template_slice, k=1)
grid_acc.imshow(template_slice, cmap='gray')
if v_idx == 0:
mask = mask[:, :, v]
mask = np.pad(mask, ((6, 6), (0, 0)), constant_values=0)
mask = np.rot90(mask, k=1)
elif v_idx == 1:
mask = mask[v, :, :]
mask = np.pad(mask, ((0, 0), (6, 6)), constant_values=0)
mask = np.rot90(mask, k=1)
else:
mask = mask[:, v, :]
mask = np.pad(mask, ((6, 6), (6, 6)), constant_values=0)
mask = np.rot90(mask, k=1)
if cmap != 'jet':
mask = (mask > 0).astype(np.uint8)
mask = np.ma.masked_where(mask == 0, mask)
if not vmax:
grid_acc.imshow(mask, cmap=cmap, alpha=0.6)
else:
grid_acc.imshow(mask, cmap=cmap, alpha=0.6, vmin=vmin, vmax=vmax)
| 4,837
| 35.104478
| 111
|
py
|
vae_lesion_deficit
|
vae_lesion_deficit-main/model.py
|
import math
import torch
import torch.nn as nn
import torch.distributions as D
import torch.nn.functional as F
# Define two globals
bce_fn = nn.BCELoss(reduction='none')
Tensor = torch.cuda.FloatTensor
def add_coords(x, just_coords=False):
'''
This just the Uber CoordConv method extended to 3D. Definitely use it on the input
Using it on other layers of the model can be helpful, but it slows down training
:param x:
:param just_coords:
:return:
'''
batch_size_shape, channel_in_shape, dim_z, dim_y, dim_x = x.shape
xx_ones = torch.ones([1, 1, 1, 1, dim_x])
yy_ones = torch.ones([1, 1, 1, 1, dim_y])
zz_ones = torch.ones([1, 1, 1, 1, dim_z])
xy_range = torch.arange(dim_y).float()
xy_range = xy_range[None, None, None, :, None]
yz_range = torch.arange(dim_z).float()
yz_range = yz_range[None, None, None, :, None]
zx_range = torch.arange(dim_x).float()
zx_range = zx_range[None, None, None, :, None]
xy_channel = torch.matmul(xy_range, xx_ones)
xx_channel = torch.cat([xy_channel + i for i in range(dim_z)], dim=2)
xx_channel = xx_channel.repeat(batch_size_shape, 1, 1, 1, 1)
yz_channel = torch.matmul(yz_range, yy_ones)
yz_channel = yz_channel.permute(0, 1, 3, 4, 2)
yy_channel = torch.cat([yz_channel + i for i in range(dim_x)], dim=4)
yy_channel = yy_channel.repeat(batch_size_shape, 1, 1, 1, 1)
zx_channel = torch.matmul(zx_range, zz_ones)
zx_channel = zx_channel.permute(0, 1, 4, 2, 3)
zz_channel = torch.cat([zx_channel + i for i in range(dim_y)], dim=3)
zz_channel = zz_channel.repeat(batch_size_shape, 1, 1, 1, 1)
xx_channel = xx_channel.to(x.device)
yy_channel = yy_channel.to(x.device)
zz_channel = zz_channel.to(x.device)
xx_channel = xx_channel.float() / (dim_x - 1)
yy_channel = yy_channel.float() / (dim_y - 1)
zz_channel = zz_channel.float() / (dim_z - 1)
if just_coords:
out = torch.cat([xx_channel, yy_channel, zz_channel], dim=1)
else:
out = torch.cat([x, xx_channel, yy_channel, zz_channel], dim=1)
return out
class SBlock(nn.Module):
def __init__(self, in_planes, planes, downsample=False, ks=3, stride=1, upsample=False, add_coords=False):
'''
This is the Convolutional block that constitutes the meat of the Encoder and Decoder
:param in_planes:
:param planes:
:param downsample:
:param ks:
:param stride:
:param upsample:
:param add_coords:
'''
super(SBlock, self).__init__()
self.downsample = downsample
self.upsample = upsample
if ks == 3:
pad = 1
elif ks == 5:
pad = 2
else:
pad = 3
if add_coords:
in_planes += 3
self.add_coords = add_coords
self.c1 = nn.Sequential(nn.Conv3d(in_planes, planes, kernel_size=ks, stride=stride,
padding=pad),
nn.BatchNorm3d(planes),
nn.GELU())
self.upsample_layer = nn.Upsample(scale_factor=2, mode='nearest')
def forward(self, x):
if self.add_coords:
x = add_coords(x)
out = self.c1(x)
if self.downsample:
out = F.avg_pool3d(out, kernel_size=2, stride=2)
if self.upsample:
out = self.upsample_layer(out)
return out
class VAE(nn.Module):
def __init__(self, input_size, sd=16, z_dim=20, out_chans=1, in_chans=1):
'''
This is the VAE model that does the lesion deficit mapping inference. It does two tasks with a single latent.
First it produces the lesion-deficit map. Second it produces a reconstruction of the lesions.
Both of these are necessary because we are modelling the joint distribution P(X,Y)
There are many architectural improvements that will probably help get better accuracy, but this is a simple
architecture that works even with little data. The more data you have, the more you might want to replace
the Encoder and Decoder with something more complicated. Or even use a VDVAE
Adding coordinates helps as well, but by default the models doesn't add them
:param input_size:
:param sd:
:param z_dim:
:param out_chans:
:param in_chans:
'''
super(VAE, self).__init__()
self.sd = sd
self.z_dim = z_dim
self.half_z = z_dim // 2
# Each layer reduces by a factor of 2, how many layers we need to get to latent space 2**3
self.num_layers = int(math.log2(input_size)) - 1
'''
Encoder -- You'll probably need to tweak this to get the best results, GPU memory usage, etc.
'''
self.encoder_layers = nn.ModuleList()
enc_sd = self.sd
for l in range(self.num_layers):
self.encoder_layers.append(SBlock(in_chans, enc_sd, downsample=True))
in_chans = enc_sd
if l < self.num_layers - 1:
enc_sd *= 2
# These are the dimensions of a fully connected latent at the end of the encoder
# TODO: might not need to always be 2 cubed
self.spatial_dims = input_size // (2 ** self.num_layers)
self.dense_dims = self.spatial_dims ** 3 * (enc_sd)
'''
Parameters of the latent space
'''
self.mu = nn.Linear(self.dense_dims, z_dim)
self.logvar = nn.Linear(self.dense_dims, z_dim)
'''
Decoders for the inference maps and lesion reconstructions
'''
self.decoder_inference = nn.ModuleList()
self.decoder_reconstruction = nn.ModuleList()
self.decoder_inference.append(nn.Sequential(nn.Linear(self.half_z, self.dense_dims),
nn.GELU()))
self.decoder_reconstruction.append(nn.Sequential(nn.Linear(self.half_z, self.dense_dims),
nn.GELU()))
dec_sd = enc_sd
for l in range(self.num_layers):
self.decoder_inference.append(SBlock(dec_sd, dec_sd // 2, upsample=True))
self.decoder_reconstruction.append(SBlock(dec_sd, dec_sd // 2, upsample=True))
dec_sd = dec_sd // 2
# Finish both decoders
self.decoder_inference.append(
nn.Sequential(nn.Conv3d(dec_sd, int(dec_sd / 2), kernel_size=3, stride=1, padding=1),
nn.GELU(),
nn.Conv3d(int(dec_sd / 2), out_chans, kernel_size=1, stride=1, padding=0)
)
)
self.decoder_reconstruction.append(
nn.Sequential(nn.Conv3d(dec_sd, int(dec_sd / 2), kernel_size=3, stride=1, padding=1),
nn.GELU(),
nn.Conv3d(int(dec_sd / 2), 1, kernel_size=1, stride=1, padding=0)
)
)
def sampling(self, mu, log_var):
'''
Sample your latent from z ~ N(mean, scale)
:param mu:
:param log_var:
:return:
'''
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def encoder(self, x):
for enc_layer in self.encoder_layers:
x = enc_layer(x)
x = x.view(-1, self.dense_dims)
return self.mu(x), self.logvar(x)
def decoder(self, x):
x = self.decoder_inference[0](x)
x = x.view(x.size(0), -1, self.spatial_dims, self.spatial_dims, self.spatial_dims)
for dec_layer in self.decoder_inference[1:]:
x = dec_layer(x)
return x
def rdecoder(self, x):
x = self.decoder_reconstruction[0](x)
x = x.view(x.size(0), -1, self.spatial_dims, self.spatial_dims, self.spatial_dims)
for dec_layer in self.decoder_reconstruction[1:]:
x = dec_layer(x)
return x
def forward(self, x, y):
mu, log_var = self.encoder(x)
z = self.sampling(mu, log_var)
mask_z = z[:, :self.half_z]
recon_z = z[:, self.half_z:]
kl = torch.sum(0.5 * (-log_var + torch.exp(log_var) + mu ** 2 - 1), dim=1)
return self.decoder(mask_z), self.rdecoder(recon_z), kl
class ModelWrapper(nn.Module):
def __init__(self, input_size, z_dim=128, start_dims=16, continuous=False):
'''
A model wrapper around the VAE
:param input_size:
:param z_dim:
:param start_dims:
:param continuous:
'''
super().__init__()
self.z_dim = z_dim
self.start_dims = start_dims
# 5 input channels - X, the coordinates, and Y
# 2 output channels - The mean and the variance of the inference maps
self.mask_model = VAE(input_size,
sd=start_dims,
z_dim=z_dim,
out_chans=2,
in_chans=5)
self.continuous = continuous
print(f'CONTINUOUS MODEL: {self.continuous}')
def forward(self, x, y, val=False, provided_mask=None, provided_scale=None, t=0.5, calibrate=False, lesion_threshold=None):
'''
If doing validation you will want to use the generated inference map to gauge the accuracy of the
predictions
:param x:
:param y:
:param val:
:param provided_mask:
:param provided_scale:
:param t:
:param calibrate:
:return:
'''
b, c, h, w, d = x.shape
# Add coordinates to the lesion
coord_x = add_coords(x)
# Add the label as a volume
my = y.view(-1, 1, 1, 1, 1).repeat(1, 1, h, w, d)
coord_x = torch.cat([coord_x, my], dim=1)
if val:
# If doing validation use the masks calculated from the training data
# Do a forward pass still so we can evaluate reconstruction quality and KL
masks, recons, kl_m = self.mask_model(coord_x, y)
preds_mean = provided_mask
preds_scale = provided_scale
else:
masks, recons, kl_m = self.mask_model(coord_x, y)
preds_mean = masks[:, 0].view(-1, 1, h, w, d)
preds_scale = masks[:, 1].view(-1, 1, h, w, d)
if calibrate:
# If calibrating predictions, we want to find a thresholding quantile that achieves the best accuracy!
flat_preds_a = preds_mean.view(x.size(0), -1)
qt = torch.quantile(flat_preds_a, t, dim=1).view(-1, 1, 1, 1, 1)
preds_mean = (preds_mean > qt) * preds_mean
# The three outputs of our network -> Reconstructed lesion, Mean inference map and STD variance map
recons = torch.sigmoid(recons)
# If a lesion threshold is provided, binarise the reconstruction according to this and calculate the predictive loss with it
# rather than with the original lesion
if lesion_threshold:
bin_lesion = (recons > lesion_threshold)
logits = torch.mean(bin_lesion * preds_mean, dim=(-4, -3, -2, -1)).view(-1, 1)
# Standard deviation is currently between 0 and 1, but it can be larger or smaller
scale = torch.mean(bin_lesion * preds_scale, dim=(-4, -3, -2, -1)).view(-1, 1).exp()
else:
logits = torch.mean(x * preds_mean, dim=(-4, -3, -2, -1)).view(-1, 1)
# Standard deviation is currently between 0 and 1, but it can be larger or smaller
scale = torch.mean(x * preds_scale, dim=(-4, -3, -2, -1)).view(-1, 1).exp()
'''
Calculate log P(Y|X,M), i.e. the log-likelihood of our inference objective
'''
if self.continuous:
# mask_ll = - D.Normal(logits, scale + 1e-5).log_prob(y).mean()
mask_ll = torch.mean((logits - y) ** 2)
else:
# Don't use STD on binary case because Bernoulli has no variance -> Beta distributions work well
probabilities = torch.sigmoid(logits)
mask_ll = bce_fn(probabilities, y).mean()
'''
Calculate log P(X|M), i.e. the log likelihood of our lesions
'''
recon_ll = torch.sum(bce_fn(recons, x), dim=(-3, -2, -1)).mean()
preds = torch.mean(preds_mean, dim=0).view(1, 1, h, w, d)
mask_scale = torch.mean(preds_scale, dim=0).view(1, 1, h, w, d)
# Calculate the accuracy of the predictions. If it is continuous, this is just MSE
if self.continuous:
acc = mask_ll
else:
quant_preds = (probabilities > 0.5).to(torch.float32)
acc = torch.mean(torch.eq(quant_preds, y).float())
'''
The final loss is log P(Y| X, M) + log P(X|M) + D_KL[Q(M|X,Y) || P(M)]
'''
loss = mask_ll + recon_ll + kl_m.mean()
ret_dict = dict(mean_mask=preds,
mask_scale=mask_scale,
mask_ll=mask_ll.mean(),
kl=kl_m.mean(),
loss=loss, acc=acc,
recon_ll=recon_ll.mean()
)
return ret_dict
def sample_masks(self, num_samples=400):
'''
Use this to sample the mean and STD masks from the latent space
:param x:
:param num_samples:
:return:
'''
z = torch.randn(num_samples, self.z_dim).type(Tensor)
preds = self.mask_model.decoder(z)
mean_mask = torch.mean(preds[:, 0], dim=(0, 1))
scale_mask = torch.mean(preds[:, 1], dim=(0, 1))
return mean_mask, scale_mask
| 13,659
| 36.322404
| 132
|
py
|
vae_lesion_deficit
|
vae_lesion_deficit-main/train.py
|
import numpy as np
import os
import argparse
import torch
import torch.optim as optim
import datetime
import torch as tc
from model import ModelWrapper
from utils import create_train_val_cal_loaders
Tensor = torch.cuda.FloatTensor
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def train_vdvae(config, images, labels):
device = torch.device(f"cuda:0")
best_epoch = 0
# These are the directories to store trained models and vae masks
if not os.path.exists('pretrained'):
os.makedirs('pretrained')
if not os.path.exists('vae_masks'):
os.makedirs('vae_masks')
# Get the time stamp
ft = "%Y_%m_%d_%H_%M_%S"
timestamp = datetime.datetime.now().strftime(ft)
model = ModelWrapper(config['input_size'],
z_dim=config['z_dim'],
start_dims=config['start_dims'],
continuous=config['continuous']).to(device)
num_epochs = config['epochs']
train_loader, val_loader, cal_loader = create_train_val_cal_loaders(images, labels,
batch_size=config['batch_size'],
continuous=config['continuous'])
# Other optimisers work as well, Adamax is quite stable though
optimizer = optim.Adamax(model.parameters(), weight_decay=config['wd'], lr=config['lr'])
print('NUM PARAMS: {}'.format(count_parameters(model)))
print(f'NUM EPOCHS: {num_epochs}')
best_loss = 1e30
best_acc = 0
best_lk = 1e30
global_step = 0
for epoch in range(num_epochs):
model.zero_grad()
train_acc = 0
# The trackers for the mean and scale of the inference map
vae_mask = np.zeros((config['input_size'], config['input_size'], config['input_size']))
vae_scale = np.zeros((config['input_size'], config['input_size'], config['input_size']))
for (x, y) in train_loader:
optimizer.zero_grad()
x = x.type(Tensor).to(device)
y = y.type(Tensor).to(device)
ret_dict = model(x, y)
loss = ret_dict['loss'].mean()
loss.backward()
optimizer.step()
vae_mask += np.squeeze(ret_dict['mean_mask'].cpu().data.numpy())
vae_scale += np.squeeze(ret_dict['mask_scale'].cpu().data.numpy())
train_acc += 1
global_step += 1
vae_mask = vae_mask / train_acc
val_mask = tc.from_numpy(vae_mask).type(Tensor).to(device).view(1, 1,
config['input_size'],
config['input_size'],
config['input_size'])
vae_scale = vae_scale / train_acc
val_scale = tc.from_numpy(vae_scale).type(Tensor).to(device).view(1, 1,
config['input_size'],
config['input_size'],
config['input_size'])
val_acc = 0
accuracy_acc = 0
loss_acc = 0
likelihood_acc = 0
kld_acc = 0
recon_acc = 0
with torch.no_grad():
for (x, y) in val_loader:
x = x.type(Tensor).to(device)
y = y.type(Tensor).to(device)
ret_dict = model(x, y,
provided_mask=val_mask,
provided_scale=val_scale,
val=True)
loss_acc += ret_dict['loss'].mean().item()
val_acc += 1
likelihood_acc += ret_dict['mask_ll'].item()
accuracy_acc += ret_dict['acc'].item()
kld_acc += ret_dict['kl'].item()
recon_acc += ret_dict['recon_ll'].item()
loss = loss_acc / val_acc
lk = likelihood_acc / val_acc
acc = round(accuracy_acc / val_acc, 4)
kl = round(kld_acc / val_acc, 3)
rec = recon_acc / val_acc
print(f'Epoch: {epoch}, mask likelihood: {lk}, KL: {kl}, accuracy: {acc}, recon likelihood: {rec}')
if lk < best_lk:
best_loss = loss
best_lk = lk
best_acc = acc
best_recon = recon_acc
best_epoch = epoch
torch.save(model, f"pretrained/{timestamp}.pth")
np.save(f'vae_masks/{timestamp}.npy', vae_mask)
np.save(f'vae_masks/{timestamp}_scale.npy', vae_scale)
if epoch % 10 == 0:
print(f'Best acc: {best_acc}, likelihood: {best_lk}, epoch: {best_epoch}')
print(f'Best acc: {best_acc}, likelihood: {best_loss}, epoch: {best_epoch}')
print('TRAINING DONE, CALIBRATING THE BEST MODEL')
model = torch.load(f"pretrained/{timestamp}.pth")
model.eval()
vae_mask = np.load(f'vae_masks/{timestamp}.npy')
best_threshold = 0
best_likelihood = 1e30
threshold_range = np.linspace(0.95, 0.99, num=20)
for thresh in threshold_range:
t = np.quantile(vae_mask, thresh)
bin_res = (vae_mask > t) * vae_mask
with torch.no_grad():
counter = 0
likelihood = 0
for (x, y) in cal_loader:
x = x.type(Tensor).to(device)
y = y.type(Tensor).to(device)
ret_dict = model(x, y,
calibrate=True,
t=float(thresh))
likelihood += ret_dict['mask_ll']
counter += 1
likelihood = likelihood / counter
if likelihood < best_likelihood:
best_likelihood = likelihood
best_threshold = thresh
t = np.quantile(vae_mask, best_threshold)
thresholded_mask = (vae_mask > t) * vae_mask
# Save the thresholded mask
np.save(f'vae_masks/thresholded_{timestamp}.npy', thresholded_mask)
if __name__ == '__main__':
# parser = argparse.ArgumentParser()
#
# parser.add_argument('-d', required=True)
# parser.add_argument('-c', required=True)
#
# args = parser.parse_args()
# Currently generate fake images and labels, replace with your own data
images = np.random.uniform(0, 1, (1000, 32, 32, 32))
labels = np.random.uniform(0, 1, (1000, 1))
# This config works pretty well, but context dependent
config = dict(input_size=32,
z_dim=128,
start_dims=16,
continuous=True,
epochs=1000,
batch_size=500
)
train_vdvae(config, images, labels)
| 6,905
| 34.234694
| 107
|
py
|
GCNH
|
GCNH-main/main.py
|
"""
Perform training and testing of GCNH on the 10 available splits
"""
import torch
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from utils import *
from datetime import datetime
from copy import deepcopy
from scipy.sparse import coo_matrix
from models import GCNH
from tqdm import tqdm
if __name__ == "__main__":
args = parse_args()
cuda = torch.cuda.is_available()
if args.use_seed:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
n_nodes, n_classes = get_nodes_classes(args.dataset)
labeled = None
if args.dataset in ['cora', 'pubmed', 'citeseer']:
adj, features, labels, idx_train, idx_val, idx_test, labeled = load_data_cit(args.dataset, undirected=True)
else:
features, labels, idx_train, idx_val, idx_test = load_data(args.dataset)
adj = load_graph(args.dataset, n_nodes, features, undirected=True)
print("Train percentage: ", len(idx_train) / (len(idx_train) + len(idx_val) + len(idx_test)))
print("Eval percentage: ", len(idx_val) / (len(idx_train) + len(idx_val) + len(idx_test)))
print("Test percentage: ", len(idx_test) / (len(idx_train) + len(idx_val) + len(idx_test)))
tot_splits = 10
if args.aggfunc not in ["mean", "sum", "maxpool"]:
print('Valid aggregation functions are "sum", "mean", "maxpool".\nAggregation function "{}" is not available. Using "sum" instead.'.format(args.aggfunc))
if args.aggfunc == "mean":
# Mean aggregation requires to normalize the adjacency matrix
print("Normalizing adj")
adj = normalize(adj, False)
if args.aggfunc == "maxpool":
# Precomputing this allows for a fast execution of maxpooling aggregation
coo_m = coo_matrix(adj.numpy())
row, col = torch.tensor(coo_m.row).long(), torch.tensor(coo_m.col).long()
else:
row, col = None, None
split_acc = []
for split in range(tot_splits):
print("Split: ", split)
idx_train, idx_val, idx_test = load_idx(split, args.dataset, labeled)
model = GCNH(nfeat=features.shape[1],
nhid=args.nhid,
nclass=n_classes,
dropout=args.dropout,
nlayers=args.nlayers,
maxpool=args.aggfunc == "maxpool")
if cuda:
print("Using CUDA")
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_test = idx_test.cuda()
idx_val = idx_val.cuda()
if args.aggfunc == "maxpool":
row, col = row.cuda(), col.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
batch_size = args.batch_size
num_batches = len(idx_train) // batch_size + 1
print("Number of batches: ", num_batches)
state_dict_early_model = None
best_val_acc = 0.0
best_val_loss = 0.0
t1 = datetime.now()
if args.verbose:
epochs = range(args.epochs)
else:
epochs = tqdm(range(args.epochs))
patience_count = 0
for epoch in epochs:
if patience_count > args.patience:
break
model.train()
idx = list(range(len(idx_train)))
np.random.shuffle(idx)
tot_acc = 0
tot_loss = 0
for batch in range(num_batches):
optimizer.zero_grad()
cur_idx = idx_train[idx[batch * batch_size: batch * batch_size + batch_size]]
# For each batch, forward the whole graph but compute loss only on nodes in current batch
output = model(features, adj, cur_idx=cur_idx, verbose=False,row=row,col=col)
train_loss = F.nll_loss(output, labels[cur_idx])
train_acc = accuracy(output, labels[cur_idx])
train_loss.backward()
optimizer.step()
tot_loss += train_loss.detach().cpu().numpy()
tot_acc += train_acc
# Validation for each epoch
model.eval()
with torch.no_grad():
output = model(features, adj, cur_idx=idx_val, verbose=False,row=row,col=col)
val_loss = F.nll_loss(output, labels[idx_val])
val_acc = accuracy(output, labels[idx_val])
if args.verbose:
print(
"Epoch {:05d} | Train Loss {:.4f} | Train Acc {:.4f} | Val Loss {:.4f} | Val Acc {:.4f}".format(
epoch, train_loss.item(), train_acc, val_loss, val_acc))
if val_acc >= best_val_acc and (val_acc > best_val_acc or val_loss < best_val_loss):
best_val_acc = val_acc.cpu()
best_val_loss = val_loss.detach().cpu()
state_dict_early_model = deepcopy(model.state_dict())
patience_count = 0
else:
patience_count += 1
# Perform test
with torch.no_grad():
print("Testing")
model.load_state_dict(state_dict_early_model)
model.eval()
output = model(features, adj, cur_idx=idx_test, verbose=True,row=row,col=col)
acc_test = accuracy(output, labels[idx_test])
t2 = datetime.now()
split_acc.append(acc_test.item())
print("Test_acc" + ":" + str(acc_test))
print("Time: ", (t2-t1).total_seconds())
split_acc = np.array(split_acc)
print("Average acc: ", split_acc.mean())
| 5,717
| 34.7375
| 161
|
py
|
GCNH
|
GCNH-main/utils.py
|
import scipy.sparse as sp
import torch
import numpy as np
import pickle as pkl
import sys
import networkx as nx
from dataset import CustomDataset
import argparse
import random
from os import path as path
"""
READ ARGUMENTS
"""
def parse_boolean(value):
"""Parse boolean values passed as argument"""
value = value.lower()
if value in ["true", "yes", "y", "1", "t"]:
return True
elif value in ["false", "no", "n", "0", "f"]:
return False
return False
def parse_args():
""" Parse arguments """
parse = argparse.ArgumentParser()
## Run details
parse.add_argument("--model", help="model to train and test", type=str, default="GATH")
parse.add_argument("-d", "--dataset", help="dataset", type=str, default="cornell")
parse.add_argument('--model_name', type=str, help='Name of model used', default="Empty")
parse.add_argument('--verbose', type=parse_boolean, default=False, help='Whether to display training losses')
parse.add_argument('--hom_syn', type=str, default="h0.00-r1", help='Homophily level for synthetic dataset')
parse.add_argument('--use_seed', type=parse_boolean, default=True, help='Whether to use seed')
parse.add_argument('--seed', type=int, default=112, help='Seed')
parse.add_argument('--splits', type=int, default=0, help='Dataset split') ## Fix this later
parse.add_argument('--aggfunc', type=str, default="sum", help='Neighbor aggregation function: one of sum, mean or maxpool')
## Hyperparameters
parse.add_argument('--epochs', type=int, default=100, help='Number of epochs')
parse.add_argument('--patience', type=int, default=1000, help='Patience')
parse.add_argument('--batch_size', type=int, default=100000, help='Batch size')
parse.add_argument('--nhid', type=int, default=16, help='Hidden size')
parse.add_argument('--dropout', type=float, default=0.0, help='Dropout rate')
parse.add_argument('--nlayers', type=int, default=1, help='Number of layers')
parse.add_argument('--lr', type=float, default=5e-3, help='Learning rate')
parse.add_argument('--weight_decay', type=float, default=5e-3, help='Weight decay')
args = parse.parse_args()
return args
"""
GET INFORMATION FOR SELECTED MODEL
"""
def get_nodes_classes(dataset):
"""Get number of nodes and number of classes for the current graph"""
nodes = {"cornell": 183, "texas":183, "wisconsin":251, "film":7600,
"chameleon":2277, "squirrel":5201, "cora":2708, "citeseer":3327}
classes = {"cornell": 5, "texas":5, "wisconsin":5, "film":5,
"chameleon":5, "squirrel":5, "cora":7, "citeseer":6}
if dataset not in nodes:
print("Dataset is not present!")
return None, None
return nodes[dataset], classes[dataset]
def accuracy(output, labels):
"""Compute accuracy of predictions"""
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def normalize(adj, is_sparse=False):
"""Symmetrically normalize adjacency matrix."""
if is_sparse:
adj = adj.coalesce()
indices = adj.indices()
values = adj.values()
adj = sp.coo_matrix((values, (indices[0],indices[1])), shape=adj.shape)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
mx = sparse_mx_to_torch_sparse_tensor(adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo())
assert (mx.coalesce().indices() == indices).all()
return mx
else:
d = adj.sum(dim=1) + 1e-6
adj = adj / d.view([len(d),1])
return adj
"""
LOAD GRAPHS FROM FILES
"""
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def load_idx(split, dataset, labeled):
"""Return fixed splits, doesn't work for syn"""
idx = np.load("./data/{}/splits/{}_split_0.6_0.2_{}.npz".format(dataset, dataset, split))
if labeled is None:
idx_train = np.where(idx['train_mask'] == 1)[0]
idx_test = np.where(idx['test_mask'] == 1)[0]
idx_val = np.where(idx['val_mask'] == 1)[0]
else:
idx_train = np.where(idx['train_mask'][labeled] == 1)[0]
idx_test = np.where(idx['test_mask'][labeled] == 1)[0]
idx_val = np.where(idx['val_mask'][labeled] == 1)[0]
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return idx_train, idx_val, idx_test
def load_syn_cora(name):
"""Load the dataset in file `syn-cora/<name>.npz`
`seed` controls the generation of training, validation and test splits"""
dataset = CustomDataset(root="syn-cora", name=name, setting="gcn", seed=15)
adj = dataset.adj # Access adjacency matrix
features = dataset.features # Access node features
labels = dataset.labels
idx = np.arange(features.shape[0])
random.seed(155)
random.shuffle(idx)
idx_train = idx[:int(0.5*len(idx))]
idx_val = idx[int(0.5*len(idx)):int(0.7*len(idx))]
idx_test = idx[int(0.7*len(idx)):]
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
labels = torch.LongTensor(labels)
features = sp.csr_matrix(features, dtype=np.float32)
features = torch.FloatTensor(np.array(features.todense()))
adj = torch.FloatTensor(np.array(adj.todense()))
return features, labels, adj, idx_train, idx_val, idx_test
def load_data(dataset, split_name=0):
"""Load features, labels and splits for the specified dataset"""
feature_list = []
label_list = []
f = open('./data/{}/out1_node_feature_label.txt'.format(dataset), 'r')
for line in f.readlines():
ele = line.strip().split('\t')
if ele[0] == 'node_id':
continue
feature = ele[1]
label = int(ele[2])
if dataset == 'film':
feature_array = np.zeros([931])
for f in feature.strip().split(','):
feature_array[int(f)-1] = 1
feature_list.append(feature_array)
else:
feature = feature.strip().split(',')
feature_list.append(feature)
label_list.append(label)
feature = np.array(feature_list, dtype=float)
idx = np.load("./data/{}/splits/{}_split_0.6_0.2_{}.npz".format(dataset, dataset, split_name))
idx_train = np.where(idx['train_mask'] == 1)[0]
idx_test = np.where(idx['test_mask'] == 1)[0]
idx_val = np.where(idx['val_mask'] == 1)[0]
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
labels = torch.LongTensor(label_list)
features = sp.csr_matrix(feature, dtype=np.float32)
features = torch.FloatTensor(np.array(features.todense()))
return features, labels, idx_train, idx_val, idx_test
def load_graph(dataset, n_nodes, features=None, undirected=False):
"""Load adjacency matrix for the specified dataset"""
print('Loading {} dataset...'.format(dataset))
struct_edges = np.genfromtxt("./data/" + dataset + "/out1_graph_edges.txt", dtype=np.int32)
sedges = np.array(list(struct_edges), dtype=np.int32).reshape(struct_edges.shape)
sadj = sp.coo_matrix((np.ones(sedges.shape[0]), (sedges[:, 0], sedges[:, 1])), shape=(n_nodes, n_nodes),
dtype=np.float32)
if undirected:
sadj = sadj + sadj.T.multiply(sadj.T > sadj) - sadj.multiply(sadj.T > sadj)
nsadj = torch.FloatTensor(np.array(sadj.todense()))
return nsadj
def load_data_cit(dataset_str, split_name=0, undirected=False):
"""
Load citation graphs Cora, Citeseer and Pubmed
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/{}/ind.{}.{}".format(dataset_str, dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/{}/ind.{}.test.index".format(dataset_str, dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
if undirected:
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = torch.FloatTensor(np.array(adj.todense()))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
lab = torch.zeros([adj.shape[0], ], dtype=torch.long)
labeled = []
for i in range(adj.shape[0]):
if len(np.where(labels[i,:] == 1)[0]) != 0:
lab[i] = int(np.where(labels[i,:] == 1)[0])
labeled.append(i)
adj = torch.reshape(adj[labeled, :][:,labeled], [len(labeled), len(labeled)])
features = features[labeled, :]
lab = lab[labeled]
idx = np.load("./data/{}/splits/{}_split_0.6_0.2_{}.npz".format(dataset_str, dataset_str, split_name))
idx_train = np.where(idx['train_mask'][labeled] == 1)[0]
idx_test = np.where(idx['test_mask'][labeled] == 1)[0]
idx_val = np.where(idx['val_mask'][labeled] == 1)[0]
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
labels = torch.LongTensor(labels)
features = sp.csr_matrix(features, dtype=np.float32)
features = torch.FloatTensor(np.array(features.todense()))
if dataset_str != "citeseer":
labeled = None
return adj, features, lab, idx_train, idx_val, idx_test, labeled
| 12,193
| 38.980328
| 127
|
py
|
GCNH
|
GCNH-main/dataset.py
|
"""
Dataset class definition for syn-cora
ref: https://github.com/GemsLab/H2GCN
"""
import os.path as osp
import numpy as np
import scipy.sparse as sp
class Dataset():
"""Dataset class contains four citation network datasets "cora", "cora-ml", "citeseer" and "pubmed",
and one blog dataset "Polblogs". Datasets "ACM", "BlogCatalog", "Flickr", "UAI",
"Flickr" are also available. See more details in https://github.com/DSE-MSU/DeepRobust/tree/master/deeprobust/graph#supported-datasets.
The 'cora', 'cora-ml', 'polblogs' and 'citeseer' are downloaded from https://github.com/danielzuegner/gnn-meta-attack/tree/master/data, and 'pubmed' is from https://github.com/tkipf/gcn/tree/master/gcn/data.
Parameters
----------
root : string
root directory where the dataset should be saved.
name : string
dataset name, it can be chosen from ['cora', 'citeseer', 'cora_ml', 'polblogs',
'pubmed', 'acm', 'blogcatalog', 'uai', 'flickr']
setting : string
there are two data splits settings. It can be chosen from ['nettack', 'gcn', 'prognn']
The 'nettack' setting follows nettack paper where they select the largest connected
components of the graph and use 10%/10%/80% nodes for training/validation/test .
The 'gcn' setting follows gcn paper where they use the full graph and 20 samples
in each class for traing, 500 nodes for validation, and 1000
nodes for test. (Note here 'netack' and 'gcn' setting do not provide fixed split, i.e.,
different random seed would return different data splits)
seed : int
random seed for splitting training/validation/test.
require_mask : bool
setting require_mask True to get training, validation and test mask
(self.train_mask, self.val_mask, self.test_mask)
Examples
--------
We can first create an instance of the Dataset class and then take out its attributes.
>>> from deeprobust.graph.data import Dataset
>>> data = Dataset(root='/tmp/', name='cora', seed=15)
>>> adj, features, labels = data.adj, data.features, data.labels
>>> idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
"""
def __init__(self, root, name, setting='nettack', seed=None, require_mask=False):
self.name = name.lower()
self.setting = setting.lower()
assert self.name in ['cora', 'citeseer', 'cora_ml', 'polblogs',
'pubmed', 'acm', 'blogcatalog', 'uai', 'flickr'], \
'Currently only support cora, citeseer, cora_ml, ' + \
'polblogs, pubmed, acm, blogcatalog, flickr'
assert self.setting in ['gcn', 'nettack', 'prognn'], "Settings should be" + \
" choosen from ['gcn', 'nettack', 'prognn']"
self.seed = seed
# self.url = 'https://raw.githubusercontent.com/danielzuegner/nettack/master/data/%s.npz' % self.name
self.url = 'https://raw.githubusercontent.com/danielzuegner/gnn-meta-attack/master/data/%s.npz' % self.name
self.root = osp.expanduser(osp.normpath(root))
self.data_folder = osp.join(root, self.name)
self.data_filename = self.data_folder + '.npz'
self.require_mask = require_mask
self.require_lcc = False if setting == 'gcn' else True
self.adj, self.features, self.labels = self.load_data()
def load_data(self):
# print('Loading {} dataset...'.format(self.name))
if self.name == 'pubmed':
return self.load_pubmed()
if self.name in ['acm', 'blogcatalog', 'uai', 'flickr']:
return self.load_zip()
if not osp.exists(self.data_filename):
self.download_npz()
adj, features, labels = self.get_adj()
return adj, features, labels
def get_adj(self):
adj, features, labels = self.load_npz(self.data_filename)
adj = adj + adj.T
adj = adj.tolil()
adj[adj > 1] = 1
if self.require_lcc:
lcc = self.largest_connected_components(adj)
adj = adj[lcc][:, lcc]
features = features[lcc]
labels = labels[lcc]
assert adj.sum(0).A1.min() > 0, "Graph contains singleton nodes"
# whether to set diag=0?
adj.setdiag(0)
adj = adj.astype("float32").tocsr()
adj.eliminate_zeros()
assert np.abs(adj - adj.T).sum() == 0, "Input graph is not symmetric"
assert adj.max() == 1 and len(np.unique(adj[adj.nonzero()].A1)) == 1, "Graph must be unweighted"
return adj, features, labels
def load_npz(self, file_name, is_sparse=True):
with np.load(file_name) as loader:
# loader = dict(loader)
if is_sparse:
adj = sp.csr_matrix((loader['adj_data'], loader['adj_indices'],
loader['adj_indptr']), shape=loader['adj_shape'])
if 'attr_data' in loader:
features = sp.csr_matrix((loader['attr_data'], loader['attr_indices'],
loader['attr_indptr']), shape=loader['attr_shape'])
else:
features = None
labels = loader.get('labels')
else:
adj = loader['adj_data']
if 'attr_data' in loader:
features = loader['attr_data']
else:
features = None
labels = loader.get('labels')
if features is None:
features = np.eye(adj.shape[0])
features = sp.csr_matrix(features, dtype=np.float32)
return adj, features, labels
def largest_connected_components(self, adj, n_components=1):
"""Select k largest connected components.
Parameters
----------
adj : scipy.sparse.csr_matrix
input adjacency matrix
n_components : int
n largest connected components we want to select
"""
_, component_indices = sp.csgraph.connected_components(adj)
component_sizes = np.bincount(component_indices)
components_to_keep = np.argsort(component_sizes)[::-1][:n_components] # reverse order to sort descending
nodes_to_keep = [
idx for (idx, component) in enumerate(component_indices) if component in components_to_keep]
print("Selecting {0} largest connected components".format(n_components))
return nodes_to_keep
def __repr__(self):
return '{0}(adj_shape={1}, feature_shape={2})'.format(self.name, self.adj.shape, self.features.shape)
def get_mask(self):
idx_train, idx_val, idx_test = self.idx_train, self.idx_val, self.idx_test
labels = self.onehot(self.labels)
def get_mask(idx):
mask = np.zeros(labels.shape[0], dtype=np.bool)
mask[idx] = 1
return mask
def get_y(idx):
mx = np.zeros(labels.shape)
mx[idx] = labels[idx]
return mx
self.train_mask = get_mask(self.idx_train)
self.val_mask = get_mask(self.idx_val)
self.test_mask = get_mask(self.idx_test)
self.y_train, self.y_val, self.y_test = get_y(idx_train), get_y(idx_val), get_y(idx_test)
def onehot(self, labels):
eye = np.identity(labels.max() + 1)
onehot_mx = eye[labels]
return onehot_mx
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
class CustomDataset(Dataset):
def __init__(self, root, name, setting='gcn', seed=None, require_mask=False):
'''
Adopted from https://github.com/DSE-MSU/DeepRobust/blob/master/deeprobust/graph/data/dataset.py
'''
self.name = name.lower()
self.setting = setting.lower()
self.seed = seed
self.url = None
self.root = osp.expanduser(osp.normpath(root))
self.data_folder = osp.join(root, self.name)
self.data_filename = self.data_folder + '.npz'
# Make sure dataset file exists
assert osp.exists(self.data_filename), f"{self.data_filename} does not exist!"
self.require_mask = require_mask
self.require_lcc = True if setting == 'nettack' else False
self.adj, self.features, self.labels = self.load_data()
if self.require_mask:
self.get_mask()
def get_adj(self):
adj, features, labels = self.load_npz(self.data_filename)
adj = adj + adj.T
adj = adj.tolil()
adj[adj > 1] = 1
if self.require_lcc:
lcc = self.largest_connected_components(adj)
# adj = adj[lcc][:, lcc]
adj_row = adj[lcc]
adj_csc = adj_row.tocsc()
adj_col = adj_csc[:, lcc]
adj = adj_col.tolil()
features = features[lcc]
labels = labels[lcc]
assert adj.sum(0).A1.min() > 0, "Graph contains singleton nodes"
# whether to set diag=0?
adj.setdiag(0)
adj = adj.astype("float32").tocsr()
adj.eliminate_zeros()
assert np.abs(adj - adj.T).sum() == 0, "Input graph is not symmetric"
assert adj.max() == 1 and len(np.unique(adj[adj.nonzero()].A1)) == 1, "Graph must be unweighted"
return adj, features, labels
| 9,331
| 39.398268
| 211
|
py
|
GCNH
|
GCNH-main/layers.py
|
"""
GCNH Layer
"""
import torch
import torch.nn as nn
from torch.nn.modules.module import Module
from torch_scatter import scatter
class GCNH_layer(Module):
def __init__(self, nfeat, nhid, maxpool):
super(GCNH_layer, self).__init__()
self.nhid = nhid
self.maxpool = maxpool
# Two MLPs, one to encode center-node embedding,
# the other for the neighborhood embedding
self.MLPfeat = nn.Sequential(
nn.Linear(nfeat, self.nhid),
nn.LeakyReLU()
)
self.init_weights(self.MLPfeat)
self.MLPmsg = nn.Sequential(
nn.Linear(nfeat, self.nhid),
nn.LeakyReLU()
)
self.init_weights(self.MLPmsg)
# Parameter beta
self.beta = nn.Parameter(0.0 * torch.ones(size=(1, 1)), requires_grad=True)
def init_weights(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
def forward(self, feat, adj, cur_idx=None,row=None,col=None):
"""
feat: feature matrix
adj: adjacency matrix
cur_idx: index of nodes in current batch
row, col: used for maxpool aggregation
"""
if cur_idx == None:
cur_idx = range(feat.shape[0])
# Transform center-node and neighborhood messages
h = self.MLPfeat(feat)
z = self.MLPmsg(feat)
# Aggregate messages
beta = torch.sigmoid(self.beta)
if not self.maxpool: # sum or mean
hp = beta * z + (1-beta) * torch.matmul(adj, h)
else:
hh = torch.zeros(adj.shape[0], self.nhid)
if next(self.parameters()).is_cuda:
hh = hh.cuda()
_ = scatter(h[row], col, dim=0, out=hh, reduce="max")
hp = beta * z + (1 - beta) * hh
return hp, beta
| 1,967
| 28.373134
| 84
|
py
|
GCNH
|
GCNH-main/models.py
|
"""
Define GCNH model
"""
import torch.nn as nn
import torch.nn.functional as F
from layers import GCNH_layer
import torch
from utils import *
class GCNH(nn.Module):
def __init__(self, nfeat, nclass, nhid, dropout, nlayers, maxpool):
super(GCNH, self).__init__()
self.nhid = nhid
self.dropout = dropout
self.nlayers = nlayers
# Define layers
layer_sizes = [nfeat] + [nhid] * (self.nlayers - 1)
self.layers = nn.ModuleList([GCNH_layer(layer_sizes[i], nhid, maxpool) for i in range(self.nlayers)])
# MLP for classification
self.MLPcls = nn.Sequential(
nn.Linear(self.nhid, nclass),
nn.LogSoftmax(dim=1)
)
self.init_weights(self.MLPcls)
def init_weights(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
def forward(self, feat, adj, cur_idx=None, verbose=False, row=None, col=None):
"""
feat: feature matrix
adj: adjacency matrix
cur_idx: index of nodes in current batch
row, col: used for maxpool aggregation
"""
if cur_idx == None:
cur_idx = range(feat.shape[0])
hp = feat
for i in range(self.nlayers):
hp, beta = self.layers[i](hp, adj, cur_idx=cur_idx,row=row,col=col)
if verbose:
print("Layer: ", i, " beta: ", beta.item())
hp = F.dropout(hp, self.dropout, training=self.training)
return self.MLPcls(hp[cur_idx])
| 1,619
| 28.454545
| 109
|
py
|
GCNH
|
GCNH-main/main_syn.py
|
"""
Perform training and testing of GCNH on the synthetic dataset
"""
import torch
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from utils import *
import os
from tqdm import tqdm
from copy import deepcopy
from models import GCNH
from scipy.sparse import coo_matrix
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
args = parse_args()
dataset = "syn"
n_classes = 5
cuda = torch.cuda.is_available()
if args.use_seed:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
for hom_syn in ["h0.00-r", "h0.10-r", "h0.20-r", "h0.30-r", "h0.40-r", "h0.50-r", "h0.60-r", "h0.70-r", "h0.80-r", "h0.90-r", "h1.00-r"]:
final_acc = []
b_list = []
for r in range(1,4): # There are 3 datasets for each homophily level
print("Loading graph ", hom_syn + str(r))
features, labels, adj, idx_train, idx_val, idx_test = load_syn_cora(hom_syn + str(r))
if args.aggfunc == "mean":
adj = normalize(adj)
if args.aggfunc == "maxpool":
# Precomputing this allows for a fast execution of maxpooling aggregation
coo_m = coo_matrix(adj.numpy())
row, col = torch.tensor(coo_m.row).long(), torch.tensor(coo_m.col).long()
else:
row, col = None, None
model = GCNH(nfeat=features.shape[1],
nhid=args.nhid,
nclass=n_classes,
dropout=args.dropout,
nlayers=args.nlayers,
maxpool=args.aggfunc == "maxpool")
if cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_test = idx_test.cuda()
idx_val = idx_val.cuda()
if args.aggfunc == "maxpool":
row, col = row.cuda(), col.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
n_nodes = adj.shape[0]
batch_size = args.batch_size
num_batches = len(idx_train) // batch_size + 1
state_dict_early_model = None
best_val_acc = 0.0
best_val_loss = 0.0
for epoch in tqdm(range(args.epochs)):
model.train()
idx = list(range(len(idx_train)))
np.random.shuffle(idx)
for batch in range(num_batches):
optimizer.zero_grad()
cur_idx = idx_train[idx[batch * batch_size: batch * batch_size + batch_size]]
# For each batch, forward the whole graph but compute loss only on nodes in current batch
output = model(features, adj, cur_idx=cur_idx, verbose=False,row=row,col=col)
train_loss = F.nll_loss(output, labels[cur_idx])
train_acc = accuracy(output, labels[cur_idx])
train_loss.backward()
optimizer.step()
# Validation for each epoch
model.eval()
with torch.no_grad():
output = model(features, adj, cur_idx=idx_val, verbose=False,row=row,col=col)
val_loss = F.nll_loss(output, labels[idx_val])
val_acc = accuracy(output, labels[idx_val])
if args.verbose:
print(
"Epoch {:05d} | Train Loss {:.4f} | Train Acc {:.4f} | Val Loss {:.4f} | Val Acc {:.4f}".format(
epoch, train_loss.item(), train_acc, val_loss, val_acc))
if val_acc >= best_val_acc and (val_acc > best_val_acc or val_loss < best_val_loss):
best_val_acc = val_acc.cpu()
best_val_loss = val_loss.detach().cpu()
state_dict_early_model = deepcopy(model.state_dict())
# Perform test
with torch.no_grad():
model.load_state_dict(state_dict_early_model)
model.eval()
output = model(features, adj, cur_idx=idx_test, verbose=True,row=row,col=col)
acc_test = accuracy(output, labels[idx_test])
final_acc.append(acc_test.detach().cpu().item())
print("Test_acc" + ":" + str(acc_test.detach().cpu().item()))
final_acc = np.array(final_acc)
print("Total accuracy: ", np.mean(final_acc) , " std: ", np.std(final_acc))
| 4,675
| 36.111111
| 141
|
py
|
path-integrity
|
path-integrity-main/pi-oracle.py
|
#!/usr/bin/python
import re
import os
import sys
debug = True
lines = sys.stdin.readlines()
lemma = sys.argv[1]
# INPUT:
# - lines contain a list of "%i:goal" where "%i" is the index of the goal
# - lemma contain the name of the lemma under scrutiny
# OUTPUT:
# - (on stdout) a list of ordered index separated by EOL
def debugPrint(text):
if (debug):
print(text)
rank = [] # list of list of goals, main list is ordered by priority
# Higher priority is better
# e.g. things with priority 10 will happen first,
# things with priority 0 go close to last
# things with no priority go very last
maxPrio = 11
for i in range(0,maxPrio):
rank.append([])
# ---------------------- #
# ---- Reachability ---- #
# ---------------------- #
if 'reachable' in lemma:
debugPrint("Applying oracle to lemma: "+lemma)
for line in lines:
num = line.split(':')[0]
if 'Add' in line: rank[10].append(num)
elif 'TOKEN' in line: rank[9].append(num) # Tokens and counters take top priority
elif 'COUNTER' in line: rank[9].append(num)
elif 'Ltk' in line: rank[8].append(num)
elif 'Pk' in line: rank[8].append(num)
elif 'Build' in line: rank[7].append(num)
# ---------------------- #
# --- Path Integrity --- #
# ---------------------- #
elif 'integrity' in lemma:
debugPrint("Applying oracle to lemma: "+lemma)
for line in lines:
num = line.split(':')[0]
# Highest priority goes to event facts actually used in the lemma
if 'Add' in line: rank[10].append(num)
elif 'Forward' in line: rank[10].append(num)
elif 'StartBuild' in line: rank[10].append(num)
elif 'Complete' in line: rank[10].append(num)
# Next priority goes to enforcing any bounds
elif 'TOKEN' in line: rank[9].append(num)
elif 'COUNTER' in line: rank[9].append(num)
# Next deduce where keys came from (which may then trigger aforementioned bounds)
elif 'Ltk' in line: rank[8].append(num)
elif 'Pk' in line: rank[8].append(num)
elif 'ShKey' in line: rank[8].append(num)
elif 'SessKey' in line: rank[8].append(num)
elif 'SegmentKey' in line: rank[8].append(num)
# Next deduce adversary knowledge (e.g. a send rule must have happened)
elif 'KU' in line: rank[7].append(num)
# Next run through construction phase
elif 'Build' in line: rank[6].append(num)
# ---------------------- #
# --- Path Symmetry --- #
# ---------------------- #
elif 'symmetry' in lemma:
debugPrint("Applying oracle to lemma: "+lemma)
for line in lines:
num = line.split(':')[0]
# Highest priority goes to event facts actually used in the lemma
if 'Add' in line: rank[10].append(num)
elif 'Forward' in line: rank[10].append(num)
elif 'StartBuild' in line: rank[10].append(num)
elif 'Complete' in line: rank[10].append(num)
# Next priority goes to enforcing any bounds
elif 'TOKEN' in line: rank[9].append(num)
elif 'COUNTER' in line: rank[9].append(num)
# Next deduce where keys came from (which may then trigger aforementioned bounds)
elif 'Ltk' in line: rank[8].append(num)
elif 'Pk' in line: rank[8].append(num)
elif 'ShKey' in line: rank[8].append(num)
elif 'SessKey' in line: rank[8].append(num)
elif 'SegmentKey' in line: rank[8].append(num)
# Next deduce adversary knowledge (e.g. a send rule must have happened)
elif 'KU' in line: rank[7].append(num)
# Next run through construction phase
elif 'Build' in line: rank[6].append(num)
else:
debugPrint("No oracle found for this lemma")
exit(0)
# Ordering all goals by ranking (higher first)
for listGoals in reversed(rank):
for goal in listGoals:
print(goal)
| 3,710
| 34.682692
| 87
|
py
|
merc2020
|
merc2020-master/feature_extract.py
|
"""
train code
"""
import os
import tensorflow as tf
import keras
from keras.layers import Dense, Lambda, AveragePooling1D
import numpy as np
from keras import backend as K
from keras.models import load_model, Model
os.environ["CUDA_VISIBLE_DEVICES"]='0'
def attention_pooling(model_input):
"""
attention pooling module
Args:
model_input: sequential input
Returns:
attention_output: attention weight
"""
# average pooling for lstm units
model_input_mean = AveragePooling1D(pool_size=128, data_format='channels_first', padding='valid')(model_input)
model_input_mean = Lambda(lambda x: K.squeeze(x, axis=2))(model_input_mean)
# transposed input
model_input_tran = Lambda(lambda x: K.permute_dimensions(x, [0, 2, 1]))(model_input)
# calculate attention weight
attention = Dense(50, activation='softmax', name='attention')(model_input_mean)
# input * attention weight
attention_output = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=(1, 2)))([attention, model_input_tran])
return attention_output
def inter_model_load(model_path):
speech_model = load_model(model_path, custom_objects={'attention_pooling': attention_pooling}) # Load best model
inter_layer_model = Model(inputs=speech_model.input, outputs=speech_model.get_layer('dense_2').output)
return inter_layer_model
def main(data_type):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
config = tf.ConfigProto(device_count={'GPU': 1, 'CPU': 30}, gpu_options=gpu_options)
sess = tf.Session(config=config)
keras.backend.set_session(sess)
# Load mel-spectrogram numpy
x = np.load('dataset/' + 'speech_' + data_type + '.npy')
# Load model
modelPath = 'model/speech_model_acc_0.3925.hdf5'
model = inter_model_load(modelPath)
model.summary()
# Feature extraction
feature = model.predict(x, verbose=1, batch_size=256)
print(np.shape(feature))
if not (os.path.isdir('features')):
os.makedirs(os.path.join('features'))
np.save('features/' + 'speech_BN_' + data_type + '.npy', feature)
print("Finished")
K.clear_session()
if __name__ == '__main__':
print("train/val/test1/test2/test3:")
data_type = input()
main(data_type)
| 2,279
| 28.230769
| 117
|
py
|
merc2020
|
merc2020-master/train.py
|
"""
train code
"""
import os
import tensorflow as tf
import keras
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Conv2D, Dropout, MaxPooling2D, Input, Flatten, Lambda, AveragePooling1D, Activation, TimeDistributed, LSTM, Bidirectional, BatchNormalization
import numpy as np
from keras import backend as K
from keras.models import Model
os.environ["CUDA_VISIBLE_DEVICES"]='0'
def attention_pooling(model_input):
"""
attention pooling module
Args:
model_input: sequential input
Returns:
attention_output: attention weight
"""
# average pooling for lstm units
model_input_mean = AveragePooling1D(pool_size=128, data_format='channels_first', padding='valid')(model_input)
model_input_mean = Lambda(lambda x: K.squeeze(x, axis=2))(model_input_mean)
# transposed input
model_input_tran = Lambda(lambda x: K.permute_dimensions(x, [0, 2, 1]))(model_input)
# calculate attention weight
attention = Dense(50, activation='softmax', name='attention')(model_input_mean)
# input * attention weight
attention_output = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=(1, 2)))([attention, model_input_tran])
return attention_output
def speech_base_model():
"""
speech baseline model
Returns:
model: speech baseline model
"""
model_in = Input(shape=(400, 40, 1))
# Layer1 : conv , batch norm, relu, and maxpool
model_conv2d_1 = Conv2D(8, (5, 5), padding='same')(model_in)
model_bn_1 = BatchNormalization()(model_conv2d_1)
model_relu_1 = Activation(activation='relu')(model_bn_1)
model_mpool_1 = MaxPooling2D((2, 2), strides=(2, 2), padding='valid')(model_relu_1)
# Layer2 : conv , batch norm, relu, and maxpool
model_conv2d_2 = Conv2D(16, (5, 5), padding='same')(model_mpool_1)
model_bn_2 = BatchNormalization()(model_conv2d_2)
model_relu_2 = Activation(activation='relu')(model_bn_2)
model_mpool_2 = MaxPooling2D((2, 2), strides=(2, 2), padding='valid')(model_relu_2)
# Layer3 : conv , batch norm, relu, and maxpool
model_conv2d_3 = Conv2D(32, (5, 5), padding='same')(model_mpool_2)
model_bn_3 = BatchNormalization()(model_conv2d_3)
model_relu_3 = Activation(activation='relu')(model_bn_3)
model_mpool_3 = MaxPooling2D((2, 2), strides=(2, 2), padding='valid')(model_relu_3)
# Flatten layer
model_flat = TimeDistributed(Flatten())(model_mpool_3)
# bi-lstm and attention pooling
model_bi_lstm = Bidirectional(LSTM(64, return_sequences=True))(model_flat)
model_att = attention_pooling(model_bi_lstm)
# dense layer
model_dense_1 = Dense(64, activation='relu')(model_att)
model_dense_1 = Dropout(0.5)(model_dense_1)
model_out = Dense(7, activation='softmax', name='output_layer')(model_dense_1)
model = Model(inputs=model_in, outputs=model_out)
model.summary()
return model
def main():
# Load Training & Validation data
_ROOT_PATH = "dataset/"
x_train = np.load(_ROOT_PATH + "speech_train.npy")
x_val = np.load(_ROOT_PATH + "speech_val.npy")
y_train = np.load(_ROOT_PATH + "label_train.npy")
y_val = np.load(_ROOT_PATH + "label_val.npy")
# Convert labels to categorical one-hot encoding
y_train = keras.utils.to_categorical(y_train, num_classes=7)
y_val = keras.utils.to_categorical(y_val, num_classes=7)
# Training Parameter setting
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
config = tf.ConfigProto(device_count={'GPU': 1, 'CPU': 30}, gpu_options=gpu_options)
sess = tf.Session(config=config)
keras.backend.set_session(sess)
# Model build
model = speech_base_model()
# Model Check point
model_path = 'model/' + 'speech_model_' + 'acc_{val_acc:.4f}.hdf5'
checkpoint = ModelCheckpoint(filepath=model_path, monitor='val_loss', verbose=1, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_acc', min_delta=0.0005, patience=30, verbose=1, mode='auto')
# Training
adam = keras.optimizers.Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, amsgrad=False)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=256, epochs=256, validation_data=(x_val, y_val), verbose=1, callbacks=[early_stopping, checkpoint])
### Evaluation
score = model.evaluate(x_val, y_val, batch_size=256)
print(score)
print('Test score:', score[0])
print('Test accuracy:', score[1])
if __name__ == '__main__':
main()
| 4,570
| 34.434109
| 173
|
py
|
merc2020
|
merc2020-master/gen_dataset.py
|
"""
generate dataset_numpy
"""
import os
import numpy as np
import librosa
import scipy
from pyvad import trim
### Pre-processing
MAX_FRAME_LENGTH = 400 # max wave length (4 sec)
STRIDE = 0.01 # STRIDE (10ms)
WINDOW_SIZE = 0.025 # filter window size (25ms)
NUM_MELS = 40 # Mel filter number
PRE_EMPHASIS_COEFF = 0.97 # Pre-Emphasis filter coefficient
EMOTION_LIST = ['hap', 'ang', 'dis', 'fea', 'neu', 'sad', 'sur']
def preprocessing(wav, sampling_rate):
"""
Args:
wav: wave
sr: sampling rate
Returns:
input_mels
"""
# Resampling to 16kHz
if sampling_rate != 16000:
sampling_rate_re = 16000 # sampling rate of resampling
wav = librosa.resample(wav, sampling_rate, sampling_rate_re)
sampling_rate = sampling_rate_re
# Denoising
wav[np.argwhere(wav == 0)] = 1e-10
wav_denoise = scipy.signal.wiener(wav, mysize=None, noise=None)
# Pre Emphasis filter
wav_emphasis = np.append(wav_denoise[0], wav_denoise[1:] - PRE_EMPHASIS_COEFF * wav_denoise[:-1])
# Normalization (Peak)
wav_max = np.abs(wav_emphasis).max() / 0.9
wav_emphasis = wav_emphasis / wav_max # normalize for VAD
# Voice Activity Detection (VAD)
vad_mode = 2 # VAD mode = 0 ~ 3
wav_vad = trim(wav_emphasis, sampling_rate, vad_mode=vad_mode, thr=0.01) ## trim
if wav_vad is None:
wav_vad = wav_emphasis
# De normalization
wav_vad = wav_vad * wav_max
# Obtain the spectrogram
sftf_vad = librosa.core.stft(y=wav_vad, hop_length=int(sampling_rate * STRIDE), n_fft=int(sampling_rate * WINDOW_SIZE))
spec = np.abs(sftf_vad) ** 2
# mel spectrogram
mel_spec = librosa.feature.melspectrogram(S=spec, n_mels=NUM_MELS)
# log scaled mel spectrogram
log_weight = 1e+6
log_mel_spec = np.log(1 + log_weight * mel_spec)
frame_length = log_mel_spec.shape[1]
# zero padding
input_mels = np.zeros((NUM_MELS, MAX_FRAME_LENGTH), dtype=float)
if frame_length < MAX_FRAME_LENGTH:
input_mels[:, :frame_length] = log_mel_spec[:, :frame_length]
else:
input_mels[:, :MAX_FRAME_LENGTH] = log_mel_spec[:, :MAX_FRAME_LENGTH]
return input_mels
# Main Code
def main(data_type):
"""
main code
Args:
data_type: data type (train/val/test1/test2/test3)
"""
# data folder path
load_path = '/data2/DATA_SET/qia2020/' + data_type
# load_path = './' + data_type
# file list
dir_files = os.listdir(load_path)
file_list = []
for files in dir_files:
if '.mp4' in files:
file_list.append(files)
file_list.sort(key=lambda f: int(f.split("-")[0]))
# Data
x_npy = np.zeros((len(file_list), MAX_FRAME_LENGTH, NUM_MELS, 1))
for num_file, file_name in enumerate(file_list):
file_path = load_path + '/' + file_name
print('File path = ' + file_path)
# load wav
wav, sampling_rate = librosa.load(file_path)
# Preprocessing(Resampling, Normalization, Denoising, Pre-emphasis, VAD)
input_mels = preprocessing(wav, sampling_rate)
# save
x_npy[num_file, :, :, 0] = input_mels.T
# Label
if (data_type == 'train') or (data_type == 'val'):
y_npy = np.zeros((len(file_list),))
for num_file, file_name in enumerate(file_list):
# Obtain Emotion label
emotion = file_name.split("-")[6]
emotion_num = EMOTION_LIST.index(emotion) # Convert to emotion number
# save
y_npy[num_file] = emotion_num
# Save numpy
if not(os.path.isdir('dataset')):
os.makedirs(os.path.join('dataset'))
np.save('dataset/' + 'speech_' + data_type + '.npy', x_npy)
if (data_type == 'train') or (data_type == 'val'):
np.save('dataset/' + 'label_' + data_type + '.npy', y_npy)
print("Finished")
if __name__ == '__main__':
print("train/val/test1/test2/test3:")
data_type = input()
main(data_type)
| 4,003
| 27.805755
| 123
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.