code stringlengths 17 6.64M |
|---|
def test_sorted_slice_sampler():
batch_size = 16
max_length = (16000 * 5)
lengths = [random.randint((16000 * 3), (16000 * 8)) for index in range(1000)]
sampler = SortedSliceSampler(lengths, batch_size=batch_size, max_length=max_length)
for epoch in range(5):
sampler.set_epoch(epoch)
id2length = lengths
for batch_ids in sampler:
batch_lengths = [id2length[idx] for idx in batch_ids]
assert (sorted(batch_lengths, reverse=True) == batch_lengths)
if (batch_lengths[0] > max_length):
assert (len(batch_lengths) == (batch_size // 2))
other_batch_sizes = [len(batch) for batch in sampler if (len(batch) not in [batch_size, (batch_size // 2)])]
assert (len(set(other_batch_sizes)) == len(other_batch_sizes))
assert (len(sampler) == len(lengths))
|
def test_sorted_bucketing_sampler():
batch_size = 16
max_length = (16000 * 5)
lengths = [random.randint((16000 * 3), (16000 * 8)) for index in range(1000)]
sampler = SortedBucketingSampler(lengths, batch_size=batch_size, max_length=max_length, shuffle=False)
for epoch in range(5):
sampler.set_epoch(epoch)
id2length = lengths
for batch_ids in sampler:
batch_lengths = [id2length[idx] for idx in batch_ids]
assert (sorted(batch_lengths, reverse=True) == batch_lengths)
if (batch_lengths[0] > max_length):
assert (len(batch_lengths) == (batch_size // 2))
batch_sizes = [len(batch_indices) for batch_indices in sampler]
other_batch_sizes = [batch_size for batch_size in batch_sizes if (batch_size not in [batch_size, (batch_size // 2)])]
assert (len(other_batch_sizes) <= 1)
assert ((len(lengths) / 16) < len(sampler) < (len(lengths) / 8))
|
def test_sox_effect():
effects = [['channels', '1'], ['rate', '16000'], ['gain', '-3.0']]
with tempfile.NamedTemporaryFile() as file:
tensor = torch.randn(1, (16000 * 10))
filename = f'{file.name}.wav'
torchaudio.save(filename, tensor, SAMPLE_RATE)
(wav1, sr1) = torchaudio.sox_effects.apply_effects_file(filename, effects=effects)
(wav2, sr2) = torchaudio.sox_effects.apply_effects_tensor(tensor, SAMPLE_RATE, effects)
torch.allclose(wav1, wav2)
assert (sr1 == sr2)
|
def test_specaug_model():
model = FrameLevelLinear(input_size=13, output_size=25, hidden_size=32)
model = ModelWithSpecaug(model)
assert (model.specaug.apply_time_mask == True)
assert (model.specaug.apply_freq_mask == True)
|
def _class_counter(data_dict):
counter = Counter()
for (data_id, data) in data_dict.items():
counter.update([data['class_name']])
return counter
|
@pytest.mark.corpus
def test_speech_commands():
env = dotenv_values()
corpus = SpeechCommandsV1(env['GSC1'], env['GSC1_TEST'])
all_data = corpus.all_data
classes = set([value['class_name'] for (key, value) in all_data.items()])
assert (len(classes) == 12), f'{classes}'
(train, valid, test) = corpus.data_split
train_class_counter = _class_counter(train)
valid_class_counter = _class_counter(valid)
test_class_counter = _class_counter(test)
assert (train_class_counter == Counter({'_unknown_': 32550, 'stop': 1885, 'on': 1864, 'go': 1861, 'yes': 1860, 'no': 1853, 'right': 1852, 'up': 1843, 'down': 1842, 'left': 1839, 'off': 1839, '_silence_': 6}))
assert (valid_class_counter == Counter({'_unknown_': 4221, 'stop': 246, 'on': 257, 'go': 260, 'yes': 261, 'no': 270, 'right': 256, 'up': 260, 'down': 264, 'left': 247, 'off': 256, '_silence_': 6}))
assert (test_class_counter == Counter({'_unknown_': 257, 'stop': 249, 'on': 246, 'go': 251, 'yes': 256, 'no': 252, 'right': 259, 'up': 272, 'down': 253, 'left': 267, 'off': 262, '_silence_': 257}))
|
def test_tokenizer():
char_tokenizer = CharacterTokenizer()
phone_tokenizer = default_phoneme_tokenizer()
char_text = 'HELLO WORLD'
char_text_enc = char_tokenizer.encode(char_text)
char_text_dec = char_tokenizer.decode(char_text_enc)
assert isinstance(char_text_enc, list)
assert (char_text == char_text_dec)
|
def test_version():
s3prl.__version__
|
def is_same_vocab(vocabs_1, vocabs_2):
if (len(vocabs_1) != len(vocabs_2)):
return False
for (v1, v2) in zip(vocabs_1, vocabs_2):
if (v1 != v2):
return False
return True
|
@pytest.mark.corpus
def test_vocabulary():
config = dotenv_values()
corpus = LibriSpeech(config['LibriSpeech'])
text_list = corpus.data_dict['train-clean-100']['text_list']
with tempfile.TemporaryDirectory() as directory:
logging.info(directory)
text_file = os.path.join(directory, 'text.txt')
with open(text_file, 'w') as fp:
for text in text_list:
fp.write((text + '\n'))
char_vocabs_1 = generate_vocab('character', text_list=text_list)
char_vocabs_2 = generate_vocab('character', text_file=text_file)
assert isinstance(char_vocabs_1, list)
assert isinstance(char_vocabs_2, list)
assert is_same_vocab(char_vocabs_1, char_vocabs_2)
char_tokenizer = load_tokenizer('character', vocab_list=char_vocabs_1)
assert (char_tokenizer.decode(char_tokenizer.encode(SAMPLE)) == SAMPLE)
word_vocabs_1 = generate_vocab('word', text_list=text_list, vocab_size=5000)
word_vocabs_2 = generate_vocab('word', text_file=text_file, vocab_size=5000)
assert isinstance(word_vocabs_1, list)
assert isinstance(word_vocabs_2, list)
assert is_same_vocab(word_vocabs_1, word_vocabs_2)
word_tokenizer = load_tokenizer('word', vocab_list=word_vocabs_1)
assert (word_tokenizer.decode(word_tokenizer.encode(SAMPLE)) == SAMPLE)
vocab_file_1 = os.path.join(directory, 'subword_1')
vocab_file_2 = os.path.join(directory, 'subword_2')
subword_vocabs_1 = generate_vocab('subword', text_list=text_list, vocab_size=500, output_file=vocab_file_1)
subword_vocabs_2 = generate_vocab('subword', text_file=text_file, vocab_size=500, output_file=vocab_file_2)
subword_tokenizer_1 = load_tokenizer('subword', vocab_file=(vocab_file_1 + '.model'))
subword_tokenizer_2 = load_tokenizer('subword', vocab_file=(vocab_file_2 + '.model'))
assert (subword_tokenizer_1.decode(subword_tokenizer_1.encode(SAMPLE)) == SAMPLE)
assert (subword_tokenizer_2.decode(subword_tokenizer_2.encode(SAMPLE)) == SAMPLE)
assert (subword_tokenizer_1.encode(SAMPLE) == subword_tokenizer_2.encode(SAMPLE))
|
@pytest.mark.corpus
@pytest.mark.parametrize('use_cache', [False, True])
def test_voxceleb1sid(use_cache):
config = dotenv_values()
voxceleb1 = Path(config['VoxCeleb1'])
if voxceleb1.is_dir():
(train_data, valid_data, test_data) = VoxCeleb1SID(voxceleb1).data_split
else:
raise ValueError('Please set the VoxCeleb1 path in .env')
|
def extract_single_name(name: str, ckpt: str, legacy: bool, output_dir: str, device: str, refresh: bool=False):
output_dir: Path = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
output_path = str((output_dir / f'{name}.pt').resolve())
if (Path(output_path).is_file() and (not refresh)):
return
model = getattr(hub, name)(ckpt=ckpt, legacy=legacy).to(device)
model.eval()
with torch.no_grad():
hidden_states = model(get_pseudo_wavs(device=device))['hidden_states']
hs = [h.detach().cpu() for h in hidden_states]
torch.save(hs, output_path)
|
class Airfoil(BaseDataset):
__doc__ = f'''
The NASA data set comprises different size NACA 0012 airfoils at various wind
tunnel speeds and angles of attack. The span of the airfoil and the observer
position were the same in all of the experiments.
{BASE_DATASET_DESCRIPTION}
Features:
int:
Frequency, in Hertzs
float:
Angle of attack, in degrees
float:
Chord length, in meters
float:
Free-stream velocity, in meters per second
float:
Suction side displacement thickness, in meters
Targets:
float:
Scaled sound pressure level, in decibels
Source:
https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise
Examples:
Load in the data set::
>>> dataset = Airfoil()
>>> dataset.shape
(1503, 6)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((1503, 5), (1503,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((1181, 5), (1181,), (322, 5), (322,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat'
_features = range(5)
_targets = [5]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
csv_file = io.BytesIO(data)
df = pd.read_csv(csv_file, sep='\t', header=None)
return df
|
class BikeSharingDaily(BaseDataset):
__doc__ = f'''
Bike sharing systems are new generation of traditional bike rentals where whole
process from membership, rental and return back has become automatic. Through these
systems, user is able to easily rent a bike from a particular position and return
back at another position. Currently, there are about over 500 bike-sharing programs
around the world which is composed of over 500 thousands bicycles. Today, there
exists great interest in these systems due to their important role in traffic,
environmental and health issues.
Apart from interesting real world applications of bike sharing systems, the
characteristics of data being generated by these systems make them attractive for
the research. Opposed to other transport services such as bus or subway, the
duration of travel, departure and arrival position is explicitly recorded in these
systems. This feature turns bike sharing system into a virtual sensor network that
can be used for sensing mobility in the city. Hence, it is expected that most of
important events in the city could be detected via monitoring these data.
{BASE_DATASET_DESCRIPTION}
Features:
instant (int):
Record index
season (int):
The season, with 1 = winter, 2 = spring, 3 = summer and 4 = autumn
yr (int):
The year, with 0 = 2011 and 1 = 2012
mnth (int):
The month, from 1 to 12 inclusive
holiday (int):
Whether day is a holiday or not, binary valued
weekday (int):
The day of the week, from 0 to 6 inclusive
workingday (int):
Working day, 1 if day is neither weekend nor holiday, otherwise 0
weathersit (int):
Weather, encoded as
1. Clear, few clouds, partly cloudy
2. Mist and cloudy, mist and broken clouds, mist and few clouds
3. Light snow, light rain and thunderstorm and scattered clouds, light rain
and scattered clouds
4. Heavy rain and ice pallets and thunderstorm and mist, or snow and fog
temp (float):
Max-min normalised temperature in Celsius, from -8 to +39
atemp (float):
Max-min normalised feeling temperature in Celsius, from -16 to +50
hum (float):
Scaled max-min normalised humidity, from 0 to 1
windspeed (float):
Scaled max-min normalised wind speed, from 0 to 1
Targets:
casual (int):
Count of casual users
registered (int):
Count of registered users
cnt (int):
Sum of casual and registered users
Source:
https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset
Examples:
Load in the data set::
>>> dataset = BikeSharingDaily()
>>> dataset.shape
(731, 15)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((731, 12), (731, 3))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((574, 12), (574, 3), (157, 12), (157, 3))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
_features = range(12)
_targets = [12, 13, 14]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
buffer = io.BytesIO(data)
with zipfile.ZipFile(buffer, 'r') as zip_file:
csv = zip_file.read('day.csv').decode('utf-8')
csv_file = io.StringIO(csv)
cols = ([0] + list(range(2, 16)))
df = pd.read_csv(csv_file, usecols=cols)
return df
|
class BikeSharingHourly(BaseDataset):
__doc__ = f'''
Bike sharing systems are new generation of traditional bike rentals where whole
process from membership, rental and return back has become automatic. Through these
systems, user is able to easily rent a bike from a particular position and return
back at another position. Currently, there are about over 500 bike-sharing programs
around the world which is composed of over 500 thousands bicycles. Today, there
exists great interest in these systems due to their important role in traffic,
environmental and health issues.
Apart from interesting real world applications of bike sharing systems, the
characteristics of data being generated by these systems make them attractive for
the research. Opposed to other transport services such as bus or subway, the
duration of travel, departure and arrival position is explicitly recorded in these
systems. This feature turns bike sharing system into a virtual sensor network that
can be used for sensing mobility in the city. Hence, it is expected that most of
important events in the city could be detected via monitoring these data.
{BASE_DATASET_DESCRIPTION}
Features:
instant (int):
Record index
season (int):
The season, with 1 = winter, 2 = spring, 3 = summer and 4 = autumn
yr (int):
The year, with 0 = 2011 and 1 = 2012
mnth (int):
The month, from 1 to 12 inclusive
hr (int):
The hour of the day, from 0 to 23 inclusive
holiday (int):
Whether day is a holiday or not, binary valued
weekday (int):
The day of the week, from 0 to 6 inclusive
workingday (int):
Working day, 1 if day is neither weekend nor holiday, otherwise 0
weathersit (int):
Weather, encoded as
1. Clear, few clouds, partly cloudy
2. Mist and cloudy, mist and broken clouds, mist and few clouds
3. Light snow, light rain and thunderstorm and scattered clouds, light rain
and scattered clouds
4. Heavy rain and ice pallets and thunderstorm and mist, or snow and fog
temp (float):
Max-min normalised temperature in Celsius, from -8 to +39
atemp (float):
Max-min normalised feeling temperature in Celsius, from -16 to +50
hum (float):
Scaled max-min normalised humidity, from 0 to 1
windspeed (float):
Scaled max-min normalised wind speed, from 0 to 1
Targets:
casual (int):
Count of casual users
registered (int):
Count of registered users
cnt (int):
Sum of casual and registered users
Source:
https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset
Examples:
Load in the data set::
>>> dataset = BikeSharingHourly()
>>> dataset.shape
(17379, 16)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((17379, 13), (17379, 3))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((13873, 13), (13873, 3), (3506, 13), (3506, 3))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
_features = range(13)
_targets = [13, 14, 15]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
buffer = io.BytesIO(data)
with zipfile.ZipFile(buffer, 'r') as zip_file:
csv = zip_file.read('hour.csv').decode('utf-8')
csv_file = io.StringIO(csv)
cols = ([0] + list(range(2, 17)))
df = pd.read_csv(csv_file, usecols=cols)
return df
|
class Blog(BaseDataset):
__doc__ = f'''
This data originates from blog posts. The raw HTML-documents of the blog posts were
crawled and processed. The prediction task associated with the data is the
prediction of the number of comments in the upcoming 24 hours. In order to simulate
this situation, we choose a basetime (in the past) and select the blog posts that
were published at most 72 hours before the selected base date/time. Then, we
calculate all the features of the selected blog posts from the information that was
available at the basetime, therefore each instance corresponds to a blog post. The
target is the number of comments that the blog post received in the next 24 hours
relative to the basetime.
In the train data, the basetimes were in the years 2010 and 2011. In the test data
the basetimes were in February and March 2012. This simulates the real-world
situtation in which training data from the past is available to predict events in
the future.
The train data was generated from different basetimes that may temporally overlap.
Therefore, if you simply split the train into disjoint partitions, the underlying
time intervals may overlap. Therefore, the you should use the provided, temporally
disjoint train and test splits in order to ensure that the evaluation is fair.
{BASE_DATASET_DESCRIPTION}
Features:
Features 0-49 (float):
50 features containing the average, standard deviation, minimum, maximum
and median of feature 50-59 for the source of the current blog post, by
which we mean the blog on which the post appeared. For example,
myblog.blog.org would be the source of the post
myblog.blog.org/post_2010_09_10
Feature 50 (int):
Total number of comments before basetime
Feature 51 (int):
Number of comments in the last 24 hours before the basetime
Feature 52 (int):
If T1 is the datetime 48 hours before basetime and T2 is the datetime 24
hours before basetime, then this is the number of comments in the time
period between T1 and T2
Feature 53 (int):
Number of comments in the first 24 hours after the publication of the blog
post, but before basetime
Feature 54 (int):
The difference between Feature 51 and Feature 52
Features 55-59 (int):
The same thing as Features 50-51, but for links (trackbacks) instead of
comments
Feature 60 (float):
The length of time between the publication of the blog post and basetime
Feature 61 (int):
The length of the blog post
Features 62-261 (int):
The 200 bag of words features for 200 frequent words of the text of the
blog post
Features 262-268 (int):
Binary indicators for the weekday (Monday-Sunday) of the basetime
Features 269-275 (int):
Binary indicators for the weekday (Monday-Sunday) of the date of
publication of the blog post
Feature 276 (int):
Number of parent pages: we consider a blog post P as a parent of blog post
B if B is a reply (trackback) to P
Features 277-279 (float):
Minimum, maximum and average of the number of comments the parents received
Targets:
int:
The number of comments in the next 24 hours (relative to baseline)
Source:
https://archive.ics.uci.edu/ml/datasets/BlogFeedback
Examples:
Load in the data set::
>>> dataset = Blog()
>>> dataset.shape
(52397, 281)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((52397, 279), (52397,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((41949, 279), (41949,), (10448, 279), (10448,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00304/BlogFeedback.zip'
_features = range(279)
_targets = [279]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
buffer = io.BytesIO(data)
with zipfile.ZipFile(buffer, 'r') as zip_file:
csv = zip_file.read('blogData_train.csv').decode('utf-8')
csv_file = io.StringIO(csv)
df = pd.read_csv(csv_file, header=None)
return df
|
class Concrete(BaseDataset):
__doc__ = f'''
Concrete is the most important material in civil engineering. The concrete
compressive strength is a highly nonlinear function of age and ingredients.
{BASE_DATASET_DESCRIPTION}
Features:
Cement (float):
Kg of cement in an m3 mixture
Blast Furnace Slag (float):
Kg of blast furnace slag in an m3 mixture
Fly Ash (float):
Kg of fly ash in an m3 mixture
Water (float):
Kg of water in an m3 mixture
Superplasticiser (float):
Kg of superplasticiser in an m3 mixture
Coarse Aggregate (float):
Kg of coarse aggregate in an m3 mixture
Fine Aggregate (float):
Kg of fine aggregate in an m3 mixture
Age (int):
Age in days, between 1 and 365 inclusive
Targets:
Concrete Compressive Strength (float):
Concrete compressive strength in megapascals
Source:
https://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength
Examples:
Load in the data set::
>>> dataset = Concrete()
>>> dataset.shape
(1030, 9)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((1030, 8), (1030,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((807, 8), (807,), (223, 8), (223,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'
_features = range(8)
_targets = [8]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
xls_file = io.BytesIO(data)
df = pd.read_excel(xls_file)
return df
|
class CPU(BaseDataset):
__doc__ = f'''
Relative CPU Performance Data, described in terms of its cycle time, memory size,
etc.
{BASE_DATASET_DESCRIPTION}
Features:
vendor_name (string):
Name of the vendor, 30 unique values
model_name (string):
Name of the model
myct (int):
Machine cycle time in nanoseconds
mmin (int):
Minimum main memory in kilobytes
mmax (int):
Maximum main memory in kilobytes
cach (int):
Cache memory in kilobytes
chmin (int):
Minimum channels in units
chmax (int):
Maximum channels in units
Targets:
prp (int):
Published relative performance
Source:
https://archive.ics.uci.edu/ml/datasets/Computer+Hardware
Examples:
Load in the data set::
>>> dataset = CPU()
>>> dataset.shape
(209, 9)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((209, 8), (209,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((162, 8), (162,), (47, 8), (47,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
_features = range(8)
_targets = [8]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
csv_file = io.BytesIO(data)
cols = ['vendor_name', 'model_name', 'myct', 'mmin', 'mmax', 'cach', 'chmin', 'chmax', 'prp']
df = pd.read_csv(csv_file, header=None, usecols=range(9), names=cols)
return df
|
class FacebookComments(BaseDataset):
__doc__ = f'''
Instances in this dataset contain features extracted from Facebook posts. The task
associated with the data is to predict how many comments the post will receive.
{BASE_DATASET_DESCRIPTION}
Features:
page_popularity (int):
Defines the popularity of support for the source of the document
page_checkins (int):
Describes how many individuals so far visited this place. This feature is
only associated with places; e.g., some institution, place, theater, etc.
page_talking_about (int):
Defines the daily interest of individuals towards source of the
document/post. The people who actually come back to the page, after liking
the page. This include activities such as comments, likes to a post, shares
etc., by visitors to the page
page_category (int):
Defines the category of the source of the document; e.g., place,
institution, branch etc.
agg[n] for n=0..24 (float):
These features are aggreagted by page, by calculating min, max, average,
median and standard deviation of essential features
cc1 (int):
The total number of comments before selected base date/time
cc2 (int):
The number of comments in the last 24 hours, relative to base date/time
cc3 (int):
The number of comments in the last 48 to last 24 hours relative to base
date/time
cc4 (int):
The number of comments in the first 24 hours after the publication of post
but before base date/time
cc5 (int):
The difference between cc2 and cc3
base_time (int):
Selected time in order to simulate the scenario, ranges from 0 to 71
post_length (int):
Character count in the post
post_share_count (int):
This feature counts the number of shares of the post, how many people had
shared this post onto their timeline
post_promotion_status (int):
Binary feature. To reach more people with posts in News Feed, individuals
can promote their post and this feature indicates whether the post is
promoted or not
h_local (int):
This describes the hours for which we have received the target
variable/comments. Ranges from 0 to 23
day_published[n] for n=0..6 (int):
Binary feature. This represents the day (Sunday-Saturday) on which the post
was published
day[n] for n=0..6 (int):
Binary feature. This represents the day (Sunday-Saturday) on selected base
date/time
Targets:
ncomments (int):
The number of comments in the next `h_local` hours
Source:
https://archive.ics.uci.edu/ml/datasets/Facebook+Comment+Volume+Dataset
Examples:
Load in the data set::
>>> dataset = FacebookComments()
>>> dataset.shape
(199030, 54)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((199030, 54), (199030,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((159211, 54), (159211,), (39819, 54), (39819,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00363/Dataset.zip'
_features = range(54)
_targets = [53]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
buffer = io.BytesIO(data)
with zipfile.ZipFile(buffer, 'r') as zip_file:
csv = zip_file.read('Dataset/Training/Features_Variant_5.csv')
csv_file = io.BytesIO(csv)
cols = (((((['page_popularity', 'page_checkins', 'page_talking_about', 'page_category'] + [f'agg{n}' for n in range(25)]) + ['cc1', 'cc2', 'cc3', 'cc4', 'cc5', 'base_time', 'post_length', 'post_share_count', 'post_promotion_status', 'h_local']) + [f'day_published{n}' for n in range(7)]) + [f'day{n}' for n in range(7)]) + ['ncomments'])
df = pd.read_csv(csv_file, header=None, names=cols)
return df
|
class FacebookMetrics(BaseDataset):
__doc__ = f'''
The data is related to posts' published during the year of 2014 on the Facebook's
page of a renowned cosmetics brand.
{BASE_DATASET_DESCRIPTION}
Features:
page_likes(int):
The total number of likes of the Facebook page at the given time.
post_type (int):
The type of post. Here 0 means 'Photo', 1 means 'Status', 2 means 'Link'
and 3 means 'Video'
post_category (int):
The category of the post.
post_month (int):
The month the post was posted, from 1 to 12 inclusive.
post_weekday (int):
The day of the week the post was posted, from 1 to 7 inclusive.
post_hour (int):
The hour the post was posted, from 0 to 23 inclusive
paid (int):
Binary feature, whether the post was paid for.
Targets:
total_reach (int):
The lifetime post total reach.
total_impressions (int):
The lifetime post total impressions.
engaged_users (int):
The lifetime engaged users.
post_consumers (int):
The lifetime post consumers.
post_consumptions (int):
The lifetime post consumptions.
post_impressions (int):
The lifetime post impressions by people who liked the page.
post_reach (int):
The lifetime post reach by people who liked the page.
post_engagements (int):
The lifetime people who have liked the page and engaged with
the post.
comments (int):
The number of comments.
shares (int):
The number of shares.
total_interactions (int):
The total number of interactions
Source:
https://archive.ics.uci.edu/ml/datasets/Facebook+metrics
Examples:
Load in the data set::
>>> dataset = FacebookMetrics()
>>> dataset.shape
(500, 18)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((500, 7), (500, 11))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((388, 7), (388, 11), (112, 7), (112, 11))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00368/Facebook_metrics.zip'
_features = range(7)
_targets = range(7, 18)
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
buffer = io.BytesIO(data)
with zipfile.ZipFile(buffer, 'r') as zip_file:
csv = zip_file.read('dataset_Facebook.csv')
csv_file = io.BytesIO(csv)
cols = ['page_likes', 'post_type', 'post_category', 'post_month', 'post_weekday', 'post_hour', 'paid', 'total_reach', 'total_impressions', 'engaged_users', 'post_consumers', 'post_consumptions', 'post_impressions', 'post_reach', 'post_engagements', 'comments', 'shares', 'total_interactions']
df = pd.read_csv(csv_file, sep=';', names=cols, header=0, index_col=0)
post_types = list(df.post_type.unique())
df['post_type'] = df.post_type.map((lambda txt: post_types.index(txt)))
return df
|
class FishBioconcentration(BaseDataset):
__doc__ = f'''
This dataset contains manually-curated experimental bioconcentration factor (BCF)
for 1058 molecules (continuous values). Each row contains a molecule, identified by
a CAS number, a name (if available), and a SMILES string. Additionally, the KOW
(experimental or predicted) is reported. In this database, you will also find
Extended Connectivity Fingerprints (binary vectors of 1024 bits), to be used as
independent variables to predict the BCF.
{BASE_DATASET_DESCRIPTION}
Features:
logkow (float):
Octanol water paritioning coefficient (experimental or predicted, as
indicated by ``KOW type``
kow_exp (int):
Indicates whether ``logKOW`` is experimental or predicted, with 1 denoting
experimental and 0 denoting predicted
smiles_[idx] for idx = 0..125 (int):
Encoding of SMILES string to identify the 2D molecular structure. The
encoding is as follows, where 'x' is a padding string to ensure that all
the SMILES strings are of the same length:
- 0 = 'x'
- 1 = '#'
- 2 = '('
- 3 = ')'
- 4 = '+'
- 5 = '-'
- 6 = '/'
- 7 = '1'
- 8 = '2'
- 9 = '3'
- 10 = '4'
- 11 = '5'
- 12 = '6'
- 13 = '7'
- 14 = '8'
- 15 = '='
- 16 = '@'
- 17 = 'B'
- 18 = 'C'
- 19 = 'F'
- 20 = 'H'
- 21 = 'I'
- 22 = 'N'
- 23 = 'O'
- 24 = 'P'
- 25 = 'S'
- 26 = '['
- 27 = ''
- 28 = ']'
- 29 = 'c'
- 30 = 'i'
- 31 = 'l'
- 32 = 'n'
- 33 = 'o'
- 34 = 'r'
- 35 = 's'
Targets:
logbcf (float):
Experimental fish bioconcentration factor (logarithm form)
Source:
https://archive.ics.uci.edu/ml/datasets/QSAR+fish+bioconcentration+factor+%28BCF%29
Examples:
Load in the data set::
>>> dataset = FishBioconcentration()
>>> dataset.shape
(1054, 129)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((1054, 128), (1054,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((825, 128), (825,), (229, 128), (229,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00511/QSAR_fish_BCF.zip'
_features = range(128)
_targets = [128]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
buffer = io.BytesIO(data)
with zipfile.ZipFile(buffer, 'r') as zip_file:
csv = zip_file.read('QSAR_BCF_Kow.csv')
csv_file = io.BytesIO(csv)
cols = ['cas', 'name', 'smiles', 'logkow', 'kow_exp', 'logbcf']
df = pd.read_csv(csv_file, names=cols, header=0, usecols=[col for col in cols if (col not in ['cas', 'name'])])
df = df.dropna().reset_index(drop=True)
kow_types = ['pred', 'exp']
df['kow_exp'] = df.kow_exp.map((lambda txt: kow_types.index(txt)))
max_smile = max((len(smile_string) for smile_string in df.smiles))
df['smiles'] = [(smile_string + ('x' * (max_smile - len(smile_string)))) for smile_string in df.smiles]
smile_df = pd.DataFrame(df.smiles.map(list).values.tolist())
smile_df.columns = pd.Index([f'smiles_{idx}' for idx in range(smile_df.shape[1])])
df = pd.concat([df, smile_df], axis=1)
df = df.drop(columns='smiles')
cols = ['logkow', 'kow_exp']
cols += [f'smiles_{idx}' for idx in range(max_smile)]
cols += ['logbcf']
df = df[cols]
df['logkow'] = pd.to_numeric(df.logkow)
return df
|
class FishToxicity(BaseDataset):
__doc__ = f'''
This dataset was used to develop quantitative regression QSAR models to predict
acute aquatic toxicity towards the fish Pimephales promelas (fathead minnow) on a
set of 908 chemicals. LC50 data, which is the concentration that causes death in
50% of test fish over a test duration of 96 hours, was used as model response
{BASE_DATASET_DESCRIPTION}
Features:
CIC0 (float):
Information indices
SM1_Dz(Z) (float):
2D matrix-based descriptors
GATS1i (float):
2D autocorrelations
NdsCH (int)
Atom-type counts
NdssC (int)
Atom-type counts
MLOGP (float):
Molecular properties
Targets:
LC50 (float):
A concentration that causes death in 50% of test fish over a test duration
of 96 hours. In -log(mol/L) units.
Source:
https://archive.ics.uci.edu/ml/datasets/QSAR+fish+toxicity
Examples:
Load in the data set::
>>> dataset = FishToxicity()
>>> dataset.shape
(908, 7)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((908, 6), (908,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((708, 6), (708,), (200, 6), (200,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00504/qsar_fish_toxicity.csv'
_features = range(6)
_targets = [6]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
csv_file = io.BytesIO(data)
cols = ['CIC0', 'SM1_Dz(Z)', 'GATS1i', 'NdsCH', 'NdssC', 'MLOGP', 'LC50']
df = pd.read_csv(csv_file, sep=';', header=None, names=cols)
return df
|
class ForestFire(BaseDataset):
__doc__ = f'''
This is a difficult regression task, where the aim is to predict the burned area of
forest fires, in the northeast region of Portugal, by using meteorological and
other data.
{BASE_DATASET_DESCRIPTION}
Features:
X (float):
The x-axis spatial coordinate within the Montesinho park map. Ranges from 1
to 9.
Y (float):
The y-axis spatial coordinate within the Montesinho park map Ranges from 2
to 9.
month (int):
Month of the year. Ranges from 0 to 11
day (int):
Day of the week. Ranges from 0 to 6
FFMC (float):
FFMC index from the FWI system. Ranges from 18.7 to 96.20
DMC (float):
DMC index from the FWI system. Ranges from 1.1 to 291.3
DC (float):
DC index from the FWI system. Ranges from 7.9 to 860.6
ISI (float):
ISI index from the FWI system. Ranges from 0.0 to 56.1
temp (float):
Temperature in Celsius degrees. Ranges from 2.2 to 33.3
RH (float):
Relative humidity in %. Ranges from 15.0 to 100.0
wind (float):
Wind speed in km/h. Ranges from 0.4 to 9.4
rain (float):
Outside rain in mm/m2. Ranges from 0.0 to 6.4
Targets:
area (float):
The burned area of the forest (in ha). Ranges from 0.00 to 1090.84
Notes:
The target variable is very skewed towards 0.0, thus it may make sense to model
with the logarithm transform.
Source:
https://archive.ics.uci.edu/ml/datasets/Forest+Fires
Examples:
Load in the data set::
>>> dataset = ForestFire()
>>> dataset.shape
(517, 13)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((517, 12), (517,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((401, 12), (401,), (116, 12), (116,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv'
_features = range(12)
_targets = [12]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
csv_file = io.BytesIO(data)
df = pd.read_csv(csv_file)
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
df['month'] = df.month.map((lambda string: months.index(string)))
weekdays = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
df['day'] = df.day.map((lambda string: weekdays.index(string)))
return df
|
class GasTurbine(BaseDataset):
__doc__ = f'''
Data have been generated from a sophisticated simulator of a Gas Turbines (GT),
mounted on a Frigate characterized by a COmbined Diesel eLectric And Gas (CODLAG)
propulsion plant type.
The experiments have been carried out by means of a numerical simulator of a naval
vessel (Frigate) characterized by a Gas Turbine (GT) propulsion plant. The
different blocks forming the complete simulator (Propeller, Hull, GT, Gear Box and
Controller) have been developed and fine tuned over the year on several similar
real propulsion plants. In view of these observations the available data are in
agreement with a possible real vessel.
In this release of the simulator it is also possible to take into account the
performance decay over time of the GT components such as GT compressor and
turbines.
The propulsion system behaviour has been described with this parameters:
- Ship speed (linear function of the lever position lp).
- Compressor degradation coefficient kMc.
- Turbine degradation coefficient kMt.
so that each possible degradation state can be described by a combination of this
triple (lp,kMt,kMc).
The range of decay of compressor and turbine has been sampled with an uniform grid
of precision 0.001 so to have a good granularity of representation.
In particular for the compressor decay state discretization the kMc coefficient has
been investigated in the domain [1; 0.95], and the turbine coefficient in the
domain [1; 0.975].
Ship speed has been investigated sampling the range of feasible speed from 3 knots
to 27 knots with a granularity of representation equal to tree knots.
A series of measures (16 features) which indirectly represents of the state of the
system subject to performance decay has been acquired and stored in the dataset
over the parameter's space.
{BASE_DATASET_DESCRIPTION}
Features:
lever_position (float)
The position of the lever
ship_speed (float):
The ship speed, in knots
shaft_torque (float):
The shaft torque of the gas turbine, in kN m
turbine_revolution_rate (float):
The gas turbine rate of revolutions, in rpm
generator_revolution_rate (float):
The gas generator rate of revolutions, in rpm
starboard_propeller_torque (float):
The torque of the starboard propeller, in kN
port_propeller_torque (float):
The torque of the port propeller, in kN
turbine_exit_temp (float):
Height pressure turbine exit temperature, in celcius
inlet_temp (float):
Gas turbine compressor inlet air temperature, in celcius
outlet_temp (float):
Gas turbine compressor outlet air temperature, in celcius
turbine_exit_pres (float):
Height pressure turbine exit pressure, in bar
inlet_pres (float):
Gas turbine compressor inlet air pressure, in bar
outlet_pres (float):
Gas turbine compressor outlet air pressure, in bar
exhaust_pres (float):
Gas turbine exhaust gas pressure, in bar
turbine_injection_control (float):
Turbine injection control, in percent
fuel_flow (float):
Fuel flow, in kg/s
Targets:
compressor_decay (type):
Gas turbine compressor decay state coefficient
turbine_decay (type):
Gas turbine decay state coefficient
Source:
https://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants
Examples:
Load in the data set::
>>> dataset = GasTurbine()
>>> dataset.shape
(11934, 18)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((11934, 16), (11934, 2))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((9516, 16), (9516, 2), (2418, 16), (2418, 2))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI%20CBM%20Dataset.zip'
_features = range(16)
_targets = [16, 17]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
buffer = io.BytesIO(data)
with zipfile.ZipFile(buffer, 'r') as zip_file:
txt_bytes = zip_file.read('UCI CBM Dataset/data.txt')
txt = txt_bytes[3:].decode('utf-8').replace('\n ', '\n')
txt = txt.replace(' ', ',')
csv_file = io.StringIO(txt)
cols = ['lever_position', 'ship_speed', 'shaft_torque', 'turbine_revolution_rate', 'generator_revolution_rate', 'starboard_propeller_torque', 'port_propeller_torque', 'turbine_exit_temp', 'inlet_temp', 'outlet_temp', 'turbine_exit_pres', 'inlet_pres', 'outlet_pres', 'exhaust_pres', 'turbine_injection_control', 'fuel_flow', 'compressor_decay', 'turbine_decay']
df = pd.read_csv(csv_file, header=None, names=cols)
return df
|
class Nanotube(BaseDataset):
__doc__ = f'''
CASTEP can simulate a wide range of properties of materials proprieties using
density functional theory (DFT). DFT is the most successful method calculates
atomic coordinates faster than other mathematical approaches, and it also reaches
more accurate results. The dataset is generated with CASTEP using CNT geometry
optimization. Many CNTs are simulated in CASTEP, then geometry optimizations are
calculated. Initial coordinates of all carbon atoms are generated randomly.
Different chiral vectors are used for each CNT simulation.
The atom type is selected as carbon, bond length is used as 1.42 A° (default
value). CNT calculation parameters are used as default parameters. To finalize the
computation, CASTEP uses a parameter named as elec_energy_tol (electrical energy
tolerance) (default 1x10-5 eV) which represents that the change in the total energy
from one iteration to the next remains below some tolerance value per atom for a
few self-consistent field steps. Initial atomic coordinates (u, v, w), chiral
vector (n, m) and calculated atomic coordinates (u, v, w) are obtained from the
output files.
{BASE_DATASET_DESCRIPTION}
Features:
Chiral indice n (int):
n parameter of the selected chiral vector
Chiral indice m (int):
m parameter of the selected chiral vector
Initial atomic coordinate u (float):
Randomly generated u parameter of the initial atomic coordinates
of all carbon atoms.
Initial atomic coordinate v (float):
Randomly generated v parameter of the initial atomic coordinates
of all carbon atoms.
Initial atomic coordinate w (float):
Randomly generated w parameter of the initial atomic coordinates
of all carbon atoms.
Targets:
Calculated atomic coordinates u (float):
Calculated u parameter of the atomic coordinates of all
carbon atoms
Calculated atomic coordinates v (float):
Calculated v parameter of the atomic coordinates of all
carbon atoms
Calculated atomic coordinates w (float):
Calculated w parameter of the atomic coordinates of all
carbon atoms
Sources:
https://archive.ics.uci.edu/ml/datasets/Carbon+Nanotubes
https://doi.org/10.1007/s00339-016-0153-1
https://doi.org/10.17341/gazimmfd.337642
Examples:
Load in the data set::
>>> dataset = Nanotube()
>>> dataset.shape
(10721, 8)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((10721, 5), (10721, 3))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((8541, 5), (8541, 3), (2180, 5), (2180, 3))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv'
_features = range(5)
_targets = [5, 6, 7]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
csv_file = io.BytesIO(data)
df = pd.read_csv(csv_file, sep=';', decimal=',')
return df
|
class Parkinsons(BaseDataset):
__doc__ = f'''
This dataset is composed of a range of biomedical voice measurements from 42 people
with early-stage Parkinson's disease recruited to a six-month trial of a
telemonitoring device for remote symptom progression monitoring. The recordings
were automatically captured in the patient's homes.
Columns in the table contain subject number, subject age, subject gender, time
interval from baseline recruitment date, motor UPDRS, total UPDRS, and 16
biomedical voice measures. Each row corresponds to one of 5,875 voice recording
from these individuals. The main aim of the data is to predict the motor and total
UPDRS scores ('motor_UPDRS' and 'total_UPDRS') from the 16 voice measures.
{BASE_DATASET_DESCRIPTION}
Features:
subject# (int):
Integer that uniquely identifies each subject
age (int):
Subject age
sex (int):
Binary feature. Subject sex, with 0 being male and 1 female
test_time (float):
Time since recruitment into the trial. The integer part is the
number of days since recruitment
Jitter(%) (float):
Measure of variation in fundamental frequency
Jitter(Abs) (float):
Measure of variation in fundamental frequency
Jitter:RAP (float):
Measure of variation in fundamental frequency
Jitter:PPQ5 (float):
Measure of variation in fundamental frequency
Jitter:DDP (float):
Measure of variation in fundamental frequency
Shimmer (float):
Measure of variation in amplitude
Shimmer(dB) (float):
Measure of variation in amplitude
Shimmer:APQ3 (float):
Measure of variation in amplitude
Shimmer:APQ5 (float):
Measure of variation in amplitude
Shimmer:APQ11 (float):
Measure of variation in amplitude
Shimmer:DDA (float):
Measure of variation in amplitude
NHR (float):
Measure of ratio of noise to tonal components in the voice
HNR (float):
Measure of ratio of noise to tonal components in the voice
RPDE (float):
A nonlinear dynamical complexity measure
DFA (float):
Signal fractal scaling exponent
PPE (float):
A nonlinear measure of fundamental frequency variation
Targets:
motor_UPDRS (float):
Clinician's motor UPDRS score, linearly interpolated
total_UPDRS (float):
Clinician's total UPDRS score, linearly interpolated
Source:
https://archive.ics.uci.edu/ml/datasets/Parkinsons+Telemonitoring
Examples:
Load in the data set::
>>> dataset = Parkinsons()
>>> dataset.shape
(5875, 22)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((5875, 20), (5875, 2))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((4659, 20), (4659, 2), (1216, 20), (1216, 2))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/parkinsons/telemonitoring/parkinsons_updrs.data'
_features = range(20)
_targets = [20, 21]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
csv_file = io.BytesIO(data)
df = pd.read_csv(csv_file, header=0)
cols = [col for col in df.columns if (col[(- 5):] != 'UPDRS')]
df = df[(cols + ['motor_UPDRS', 'total_UPDRS'])]
return df
|
class PowerPlant(BaseDataset):
__doc__ = f'''
The dataset contains 9568 data points collected from a Combined Cycle Power Plant
over 6 years (2006-2011), when the power plant was set to work with full load.
Features consist of hourly average ambient variables Temperature (T), Ambient
Pressure (AP), Relative Humidity (RH) and Exhaust Vacuum (V) to predict the net
hourly electrical energy output (EP) of the plant.
A combined cycle power plant (CCPP) is composed of gas turbines (GT), steam
turbines (ST) and heat recovery steam generators. In a CCPP, the electricity is
generated by gas and steam turbines, which are combined in one cycle, and is
transferred from one turbine to another. While the Vacuum is colected from and has
effect on the Steam Turbine, he other three of the ambient variables effect the GT
performance.
For comparability with our baseline studies, and to allow 5x2 fold statistical
tests be carried out, we provide the data shuffled five times. For each shuffling
2-fold CV is carried out and the resulting 10 measurements are used for statistical
testing.
{BASE_DATASET_DESCRIPTION}
Features:
AT (float):
Hourly average temperature in Celsius, ranges from 1.81 to 37.11
V (float):
Hourly average exhaust vacuum in cm Hg, ranges from 25.36 to 81.56
AP (float):
Hourly average ambient pressure in millibar, ranges from 992.89
to 1033.30
RH (float):
Hourly average relative humidity in percent, ranges from 25.56 to 100.16
Targets:
PE (float):
Net hourly electrical energy output in MW, ranges from 420.26 to 495.76
Source:
https://archive.ics.uci.edu/ml/datasets/Combined+Cycle+Power+Plant
Examples:
Load in the data set::
>>> dataset = PowerPlant()
>>> dataset.shape
(9568, 5)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((9568, 4), (9568,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((7633, 4), (7633,), (1935, 4), (1935,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00294/CCPP.zip'
_features = range(4)
_targets = [4]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
buffer = io.BytesIO(data)
with zipfile.ZipFile(buffer, 'r') as zip_file:
xlsx = zip_file.read('CCPP/Folds5x2_pp.xlsx')
xlsx_file = io.BytesIO(xlsx)
df = pd.read_excel(xlsx_file)
return df
|
class Protein(BaseDataset):
__doc__ = f'''
This is a data set of Physicochemical Properties of Protein Tertiary Structure. The
data set is taken from CASP 5-9. There are 45730 decoys and size varying from 0 to
21 armstrong.
{BASE_DATASET_DESCRIPTION}
Features:
F1 (float):
Total surface area
F2 (float):
Non polar exposed area
F3 (float):
Fractional area of exposed non polar residue
F4 (float):
Fractional area of exposed non polar part of residue
F5 (float):
Molecular mass weighted exposed area
F6 (float):
Average deviation from standard exposed area of residue
F7 (float):
Euclidean distance
F8 (float):
Secondary structure penalty
F9 (float):
Spacial Distribution constraints (N,K Value)
Targets:
RMSD (float):
Size of the residue
Source:
https://archive.ics.uci.edu/ml/datasets/Physicochemical+Properties+of+Protein+Tertiary+Structure
Examples:
Load in the data set::
>>> dataset = Protein()
>>> dataset.shape
(45730, 10)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((45730, 9), (45730,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((36580, 9), (36580,), (9150, 9), (9150,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv'
_features = range(9)
_targets = [9]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
csv_file = io.BytesIO(data)
df = pd.read_csv(csv_file)
df = df[([f'F{i}' for i in range(1, 10)] + ['RMSD'])]
return df
|
class Servo(BaseDataset):
__doc__ = f'''
Data was from a simulation of a servo system.
Ross Quinlan:
This data was given to me by Karl Ulrich at MIT in 1986. I didn't record his
description at the time, but here's his subsequent (1992) recollection:
"I seem to remember that the data was from a simulation of a servo system involving
a servo amplifier, a motor, a lead screw/nut, and a sliding carriage of some sort.
It may have been on of the translational axes of a robot on the 9th floor of the AI
lab. In any case, the output value is almost certainly a rise time, or the time
required for the system to respond to a step change in a position set point."
(Quinlan, ML'93)
"This is an interesting collection of data provided by Karl Ulrich. It covers an
extremely non-linear phenomenon - predicting the rise time of a servomechanism in
terms of two (continuous) gain settings and two (discrete) choices of mechanical
linkages."
{BASE_DATASET_DESCRIPTION}
Features:
motor (int):
Motor, ranges from 0 to 4 inclusive
screw (int):
Screw, ranges from 0 to 4 inclusive
pgain (int):
PGain, ranges from 3 to 6 inclusive
vgain (int):
VGain, ranges from 1 to 5 inclusive
Targets:
class (float):
Class values, ranges from 0.13 to 7.10 inclusive
Source:
https://archive.ics.uci.edu/ml/datasets/Servo
Examples:
Load in the data set::
>>> dataset = Servo()
>>> dataset.shape
(167, 5)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((167, 4), (167,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((131, 4), (131,), (36, 4), (36,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/servo/servo.data'
_features = range(4)
_targets = [4]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
csv_file = io.BytesIO(data)
cols = ['motor', 'screw', 'pgain', 'vgain', 'class']
df = pd.read_csv(csv_file, names=cols)
codes = ['A', 'B', 'C', 'D', 'E']
df['motor'] = df.motor.map((lambda x: codes.index(x)))
df['screw'] = df.screw.map((lambda x: codes.index(x)))
return df
|
class SolarFlare(BaseDataset):
__doc__ = f'''
Each class attribute counts the number of solar flares of a certain class that
occur in a 24 hour period.
The database contains 3 potential classes, one for the number of times a certain
type of solar flare occured in a 24 hour period.
Each instance represents captured features for 1 active region on the sun.
The data are divided into two sections. The second section (flare.data2) has had
much more error correction applied to the it, and has consequently been treated as
more reliable.
{BASE_DATASET_DESCRIPTION}
Features:
class (int):
Code for class (modified Zurich class). Ranges from 0 to 6 inclusive
spot_size (int):
Code for largest spot size. Ranges from 0 to 5 inclusive
spot_distr (int):
Code for spot distribution. Ranges from 0 to 3 inclusive
activity (int):
Binary feature indicating 1 = reduced and 2 = unchanged
evolution (int):
0 = decay, 1 = no growth and 2 = growth
flare_activity (int):
Previous 24 hour flare activity code, where 0 = nothing as big as an M1, 1
= one M1 and 2 = more activity than one M1
is_complex (int):
Binary feature indicating historically complex
became_complex (int):
Binary feature indicating whether the region became historically complex on
this pass across the sun's disk
large (int):
Binary feature, indicating whether area is large
large_spot (int):
Binary feature, indicating whether the area of the largest spot is greater
than 5
Targets:
C-class (int):
C-class flares production by this region in the following 24 hours (common
flares)
M-class (int):
M-class flares production by this region in the following 24 hours (common
flares)
X-class (int):
X-class flares production by this region in the following 24 hours (common
flares)
Source:
https://archive.ics.uci.edu/ml/datasets/Solar+Flare
Examples:
Load in the data set::
>>> dataset = SolarFlare()
>>> dataset.shape
(1066, 13)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((1066, 10), (1066, 3))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((837, 10), (837, 3), (229, 10), (229, 3))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/solar-flare/flare.data2'
_features = range(10)
_targets = range(10, 13)
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
csv_file = io.BytesIO(data)
cols = ['class', 'spot_size', 'spot_distr', 'activity', 'evolution', 'flare_activity', 'is_complex', 'became_complex', 'large', 'large_spot', 'C-class', 'M-class', 'X-class']
df = pd.read_csv(csv_file, sep=' ', skiprows=[0], names=cols)
encodings = ['A', 'B', 'C', 'D', 'E', 'F', 'H']
df['class'] = df['class'].map((lambda x: encodings.index(x)))
encodings = ['X', 'R', 'S', 'A', 'H', 'K']
df['spot_size'] = df.spot_size.map((lambda x: encodings.index(x)))
encodings = ['X', 'O', 'I', 'C']
df['spot_distr'] = df.spot_distr.map((lambda x: encodings.index(x)))
return df
|
class SpaceShuttle(BaseDataset):
__doc__ = f'''
The motivation for collecting this database was the explosion of the USA Space
Shuttle Challenger on 28 January, 1986. An investigation ensued into the
reliability of the shuttle's propulsion system. The explosion was eventually traced
to the failure of one of the three field joints on one of the two solid booster
rockets. Each of these six field joints includes two O-rings, designated as primary
and secondary, which fail when phenomena called erosion and blowby both occur.
The night before the launch a decision had to be made regarding launch safety. The
discussion among engineers and managers leading to this decision included concern
that the probability of failure of the O-rings depended on the temperature t at
launch, which was forecase to be 31 degrees F. There are strong engineering reasons
based on the composition of O-rings to support the judgment that failure
probability may rise monotonically as temperature drops. One other variable, the
pressure s at which safety testing for field join leaks was performed, was
available, but its relevance to the failure process was unclear.
Draper's paper includes a menacing figure graphing the number of field joints
experiencing stress vs. liftoff temperature for the 23 shuttle flights previous to
the Challenger disaster. No previous liftoff temperature was under 53 degrees F.
Although tremendous extrapolation must be done from the given data to assess risk
at 31 degrees F, it is obvious even to the layman "to foresee the unacceptably high
risk created by launching at 31 degrees F." For more information, see Draper (1993)
or the other previous analyses.
The task is to predict the number of O-rings that will experience thermal distress
for a given flight when the launch temperature is below freezing.
{BASE_DATASET_DESCRIPTION}
Features:
idx (int):
Temporal order of flight
temp (int):
Launch temperature in Fahrenheit
pres (int):
Leak-check pressure in psi
n_risky_rings (int):
Number of O-rings at risk on a given flight
Targets:
n_distressed_rings (int):
Number of O-rings experiencing thermal distress
Source:
https://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring
Examples:
Load in the data set::
>>> dataset = SpaceShuttle()
>>> dataset.shape
(23, 5)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((23, 4), (23,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((20, 4), (20,), (3, 4), (3,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data'
_features = range(4)
_targets = [4]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
processed_data = re.sub(' +', ' ', data.decode('utf-8'))
csv_file = io.StringIO(processed_data)
cols = ['n_risky_rings', 'n_distressed_rings', 'temp', 'pres', 'idx']
df = pd.read_csv(csv_file, sep=' ', names=cols)
df = df[['idx', 'temp', 'pres', 'n_risky_rings', 'n_distressed_rings']]
return df
|
class Stocks(BaseDataset):
__doc__ = f'''
There are three disadvantages of weighted scoring stock selection models. First,
they cannot identify the relations between weights of stock-picking concepts and
performances of portfolios. Second, they cannot systematically discover the optimal
combination for weights of concepts to optimize the performances. Third, they are
unable to meet various investors' preferences.
This study aims to more efficiently construct weighted scoring stock selection
models to overcome these disadvantages. Since the weights of stock-picking concepts
in a weighted scoring stock selection model can be regarded as components in a
mixture, we used the simplex centroid mixture design to obtain the experimental
sets of weights. These sets of weights are simulated with US stock market
historical data to obtain their performances. Performance prediction models were
built with the simulated performance data set and artificial neural networks.
Furthermore, the optimization models to reflect investors' preferences were built
up, and the performance prediction models were employed as the kernel of the
optimization models so that the optimal solutions can now be solved with
optimization techniques. The empirical values of the performances of the optimal
weighting combinations generated by the optimization models showed that they can
meet various investors' preferences and outperform those of S&P's 500 not only
during the training period but also during the testing period.
{BASE_DATASET_DESCRIPTION}
Features:
bp (float):
Large B/P
roe (float):
Large ROE
sp (float):
Large S/P
return_rate (float):
Large return rate in the last quarter
market_value (float):
Large market value
small_risk (float):
Small systematic risk
orig_annual_return (float):
Annual return
orig_excess_return (float):
Excess return
orig_risk (float):
Systematic risk
orig_total_risk (float):
Total risk
orig_abs_win_rate (float):
Absolute win rate
orig_rel_win_rate (float):
Relative win rate
Targets:
annual_return (float):
Annual return
excess_return (float):
Excess return
risk (float):
Systematic risk
total_risk (float):
Total risk
abs_win_rate (float):
Absolute win rate
rel_win_rate (float):
Relative win rate
Source:
https://archive.ics.uci.edu/ml/datasets/Stock+portfolio+performance
Examples:
Load in the data set::
>>> dataset = Stocks()
>>> dataset.shape
(252, 19)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((252, 12), (252, 6))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((197, 12), (197, 6), (55, 12), (55, 6))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00390/stock%20portfolio%20performance%20data%20set.xlsx'
_features = range(12)
_targets = range(12, 18)
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
xlsx_file = io.BytesIO(data)
cols = ['id', 'bp', 'roe', 'sp', 'return_rate', 'market_value', 'small_risk', 'orig_annual_return', 'orig_excess_return', 'orig_risk', 'orig_total_risk', 'orig_abs_win_rate', 'orig_rel_win_rate', 'annual_return', 'excess_return', 'risk', 'total_risk', 'abs_win_rate', 'rel_win_rate']
sheets = ['1st period', '2nd period', '3rd period', '4th period']
dfs = pd.read_excel(xlsx_file, sheet_name=sheets, names=cols, skiprows=[0, 1], header=None)
df = pd.concat([dfs[sheet] for sheet in sheets], ignore_index=True)
return df
|
class Superconductivity(BaseDataset):
__doc__ = f'''
This dataset contains data on 21,263 superconductors and their relevant features.
The goal here is to predict the critical temperature based on the features
extracted.
{BASE_DATASET_DESCRIPTION}
Features:
- number_of_elements (int)
- mean_atomic_mass (float)
- wtd_mean_atomic_mass (float)
- gmean_atomic_mass (float)
- wtd_gmean_atomic_mass (float)
- entropy_atomic_mass (float)
- wtd_entropy_atomic_mass (float)
- range_atomic_mass (float)
- wtd_range_atomic_mass (float)
- std_atomic_mass (float)
- wtd_std_atomic_mass (float)
- mean_fie (float)
- wtd_mean_fie (float)
- gmean_fie (float)
- wtd_gmean_fie (float)
- entropy_fie (float)
- wtd_entropy_fie (float)
- range_fie (float)
- wtd_range_fie (float)
- std_fie (float)
- wtd_std_fie (float)
- mean_atomic_radius (float)
- wtd_mean_atomic_radius (float)
- gmean_atomic_radius (float)
- wtd_gmean_atomic_radius (float)
- entropy_atomic_radius (float)
- wtd_entropy_atomic_radius (float)
- range_atomic_radius (float)
- wtd_range_atomic_radius (float)
- std_atomic_radius (float)
- wtd_std_atomic_radius (float)
- mean_Density (float)
- wtd_mean_Density (float)
- gmean_Density (float)
- wtd_gmean_Density (float)
- entropy_Density (float)
- wtd_entropy_Density (float)
- range_Density (float)
- wtd_range_Density (float)
- std_Density (float)
- wtd_std_Density (float)
- mean_ElectronAffinity (float)
- wtd_mean_ElectronAffinity (float)
- gmean_ElectronAffinity (float)
- wtd_gmean_ElectronAffinity (float)
- entropy_ElectronAffinity (float)
- wtd_entropy_ElectronAffinity (float)
- range_ElectronAffinity (float)
- wtd_range_ElectronAffinity (float)
- std_ElectronAffinity (float)
- wtd_std_ElectronAffinity (float)
- mean_FusionHeat (float)
- wtd_mean_FusionHeat (float)
- gmean_FusionHeat (float)
- wtd_gmean_FusionHeat (float)
- entropy_FusionHeat (float)
- wtd_entropy_FusionHeat (float)
- range_FusionHeat (float)
- wtd_range_FusionHeat (float)
- std_FusionHeat (float)
- wtd_std_FusionHeat (float)
- mean_ThermalConductivity (float)
- wtd_mean_ThermalConductivity (float)
- gmean_ThermalConductivity (float)
- wtd_gmean_ThermalConductivity (float)
- entropy_ThermalConductivity (float)
- wtd_entropy_ThermalConductivity (float)
- range_ThermalConductivity (float)
- wtd_range_ThermalConductivity (float)
- std_ThermalConductivity (float)
- wtd_std_ThermalConductivity (float)
- mean_Valence (float)
- wtd_mean_Valence (float)
- gmean_Valence (float)
- wtd_gmean_Valence (float)
- entropy_Valence (float)
- wtd_entropy_Valence (float)
- range_Valence (float)
- wtd_range_Valence (float)
- std_Valence (float)
- wtd_std_Valence (float)
Targets:
- critical_temp (float)
Source:
https://archive.ics.uci.edu/ml/datasets/Superconductivty+Data
Examples:
Load in the data set::
>>> dataset = Superconductivity()
>>> dataset.shape
(21263, 82)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((21263, 81), (21263,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((17004, 81), (17004,), (4259, 81), (4259,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00464/superconduct.zip'
_features = range(81)
_targets = [81]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
buffer = io.BytesIO(data)
with zipfile.ZipFile(buffer, 'r') as zip_file:
txt = zip_file.read('train.csv')
csv_file = io.BytesIO(txt)
df = pd.read_csv(csv_file)
return df
|
class TehranHousing(BaseDataset):
__doc__ = f'''
Data set includes construction cost, sale prices, project variables, and economic
variables corresponding to real estate single-family residential apartments in
Tehran, Iran.
{BASE_DATASET_DESCRIPTION}
Features:
start_year (int):
Start year in the Persian calendar
start_quarter (int)
Start quarter in the Persian calendar
completion_year (int)
Completion year in the Persian calendar
completion_quarter (int)
Completion quarter in the Persian calendar
V-1..V-8 (floats):
Project physical and financial variables
V-11-1..29-1 (floats):
Economic variables and indices in time, lag 1
V-11-2..29-2 (floats):
Economic variables and indices in time, lag 2
V-11-3..29-3 (floats):
Economic variables and indices in time, lag 3
V-11-4..29-4 (floats):
Economic variables and indices in time, lag 4
V-11-5..29-5 (floats):
Economic variables and indices in time, lag 5
Targets:
construction_cost (float)
sale_price (float)
Source:
https://archive.ics.uci.edu/ml/datasets/Residential+Building+Data+Set
Examples:
Load in the data set::
>>> dataset = TehranHousing()
>>> dataset.shape
(371, 109)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((371, 107), (371, 2))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((288, 107), (288, 2), (83, 107), (83, 2))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00437/Residential-Building-Data-Set.xlsx'
_features = range(107)
_targets = [107, 108]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
xlsx_file = io.BytesIO(data)
cols = (((['start_year', 'start_quarter', 'completion_year', 'completion_quarter'] + [f'V-{i}' for i in range(1, 9)]) + [f'V-{i}-{j}' for j in range(1, 6) for i in range(11, 30)]) + ['construction_cost', 'sale_price'])
df = pd.read_excel(xlsx_file, skiprows=[0, 1], names=cols)
return df
|
class Yacht(BaseDataset):
__doc__ = f'''
Prediction of residuary resistance of sailing yachts at the initial design stage is
of a great value for evaluating the ship's performance and for estimating the
required propulsive power. Essential inputs include the basic hull dimensions and
the boat velocity.
The Delft data set comprises 251 full-scale experiments, which were performed at
the Delft Ship Hydromechanics Laboratory for that purpose.
These experiments include 22 different hull forms, derived from a parent form
closely related to the "Standfast 43" designed by Frans Maas.
{BASE_DATASET_DESCRIPTION}
Features:
pos (float):
Longitudinal position of the center of buoyancy, adimensional
prismatic (float):
Prismatic coefficient, adimensional
displacement (float):
Length-displacement ratio, adimensional
beam_draught (float):
Beam-draught ratio, adimensional
length_beam (float):
Length-beam ratio, adimensional
froude_no (float):
Froude number, adimensional
Targets:
resistance (float):
Residuary resistance per unit weight of displacement, adimensional
Source:
https://archive.ics.uci.edu/ml/datasets/Yacht+Hydrodynamics
Examples:
Load in the data set::
>>> dataset = Yacht()
>>> dataset.shape
(251, 7)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((251, 6), (251,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((196, 6), (196,), (55, 6), (55,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data'
_features = range(6)
_targets = [6]
def _prep_data(self, data: bytes) -> pd.DataFrame:
'Prepare the data set.\n\n Args:\n data (bytes): The raw data\n\n Returns:\n Pandas dataframe: The prepared data\n '
txt_file = io.BytesIO(data)
cols = ['pos', 'prismatic', 'displacement', 'beam_draught', 'length_beam', 'froude_no', 'resistance']
df = pd.read_csv(txt_file, header=None, sep=' ', names=cols, on_bad_lines='skip')
return df
|
def quantile_loss(predictions: np.ndarray, targets: np.ndarray, quantile: float) -> float:
'Quantile loss function.\n\n Args:\n predictions (sequence of floats):\n Model predictions, of shape [n_samples,].\n targets (sequence of floats):\n Target values, of shape [n_samples,].\n quantile (float):\n The quantile we are seeking. Must be between 0 and 1.\n\n Returns:\n float: The quantile loss.\n '
target_arr = np.asarray(targets)
prediction_arr = np.asarray(predictions)
res = (target_arr - prediction_arr)
loss = np.mean(((np.maximum(res, np.zeros_like(res)) * quantile) + (np.maximum((- res), np.zeros_like(res)) * (1 - quantile))))
return float(loss)
|
def smooth_quantile_loss(predictions: np.ndarray, targets: np.ndarray, quantile: float, alpha: float=0.4) -> float:
'The smooth quantile loss function from [1].\n\n Args:\n predictions (sequence of floats):\n Model predictions, of shape [n_samples,].\n targets (sequence of floats):\n Target values, of shape [n_samples,].\n quantile (float):\n The quantile we are seeking. Must be between 0 and 1.\n alpha (float, optional):\n Smoothing parameter. Defaults to 0.4.\n\n Returns:\n float: The smooth quantile loss.\n\n Sources:\n [1]: Songfeng Zheng (2011). Gradient Descent Algorithms for Quantile Regression\n With Smooth Approximation. International Journal of Machine Learning and\n Cybernetics.\n '
target_arr = np.asarray(targets)
prediction_arr = np.asarray(predictions)
residuals = (target_arr - prediction_arr)
loss = ((quantile * residuals) + (alpha * np.log((1 + np.exp(((- residuals) / alpha))))))
return float(loss.mean())
|
class Model(Protocol):
def __init__(self, *args, **kwargs):
...
def predict(self, X: np.ndarray, **kwargs) -> Tuple[(Union[(float, np.ndarray)], np.ndarray)]:
...
def fit(self, X: np.ndarray, y: np.ndarray, **kwargs):
...
def __call__(self, X: np.ndarray, **kwargs) -> Tuple[(Union[(float, np.ndarray)], np.ndarray)]:
...
|
class BaseTreeQuantileRegressor(BaseDecisionTree):
def predict(self, X: np.ndarray, uncertainty: Optional[float]=None, quantiles: Optional[np.ndarray]=None, check_input: bool=True) -> Union[(np.ndarray, Tuple[(np.ndarray, np.ndarray)])]:
"Predict regression value for X.\n\n Args:\n X (array-like or sparse matrix):\n The input samples, of shape [n_samples, n_features]. Internally, it\n will be converted to `dtype=np.float32` and if a sparse matrix is\n provided to a sparse `csr_matrix`.\n uncertainty (float or None, optional):\n Value ranging from 0 to 1. If None then no prediction intervals will be\n returned. Defaults to None.\n quantiles (sequence of floats or None, optional):\n List of quantiles to output, as an alternative to the `uncertainty`\n argument, and will not be used if that argument is set. If None then\n `uncertainty` is used. Defaults to None.\n check_input (boolean, optional):\n Allow to bypass several input checking. Don't use this parameter unless\n you know what you do. Defaults to True.\n\n Returns:\n Array or pair of arrays:\n Either array with predictions, of shape [n_samples,], or a pair of\n arrays with the first one being the predictions and the second one\n being the desired quantiles/intervals, of shape [n_samples, 2] if\n `uncertainty` is not None, and [n_samples, n_quantiles] if `quantiles`\n is not None.\n "
X = check_array(X, dtype=np.float32, accept_sparse='csc')
preds = super().predict(X, check_input=check_input)
if ((uncertainty is not None) or (quantiles is not None)):
if (uncertainty is not None):
quantiles = [(uncertainty / 2), (1 - (uncertainty / 2))]
else:
quantiles = list((quantiles or []))
X_leaves = self.apply(X)
unique_leaves = np.unique(X_leaves)
quantile_vals = np.empty((X.shape[0], len(quantiles)))
for leaf in unique_leaves:
for (idx, quantile) in enumerate(quantiles):
X_leaf = (X_leaves == leaf)
y_leaf = self.y_train_[(self.y_train_leaves_ == leaf)]
quantile_vals[(X_leaf, idx)] = weighted_percentile(y_leaf, quantile)
return (preds, quantile_vals)
else:
return preds
def fit(self, X: np.ndarray, y: np.ndarray, sample_weight: Optional[np.ndarray]=None, check_input: bool=True):
"Build a decision tree classifier from the training set (X, y).\n\n Args:\n X (array-like or sparse matrix)\n The training input samples, of shape [n_samples, n_features].\n Internally, it will be converted to `dtype=np.float32` and if a sparse\n matrix is provided to a sparse `csc_matrix`.\n y (array-like):\n The target values (class labels) as integers or strings, of shape\n [n_samples] or [n_samples, n_outputs].\n sample_weight (array-like or None, optional):\n Sample weights of shape = [n_samples]. If None, then samples are\n equally weighted. Splits that would create child nodes with net zero or\n negative weight are ignored while searching for a split in each node.\n Splits are also ignored if they would result in any single class\n carrying a negative weight in either child node. Defaults to None.\n check_input (boolean, optional):\n Allow to bypass several input checking. Don't use this parameter unless\n you know what you do. Defaults to True.\n "
y = np.asarray(y)
if ((np.ndim(y) == 2) and (y.shape[1] == 1)):
y = np.ravel(y)
(X, y) = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=False)
super().fit(X, y, sample_weight=sample_weight, check_input=check_input)
self.y_train_ = y
self.y_train_leaves_ = self.tree_.apply(X)
return self
|
class QuantileRegressionTree(DecisionTreeRegressor, BaseTreeQuantileRegressor):
"A decision tree regressor that provides quantile estimates.\n\n Args:\n criterion (string, optional):\n The function to measure the quality of a split. Supported criteria are\n 'squared_error' for the mean squared error, which is equal to variance\n reduction as feature selection criterion, and 'absolute_error' for the mean\n absolute error. Defaults to 'squared_error'.\n splitter (string, optional):\n The strategy used to choose the split at each node. Supported strategies\n are 'best' to choose the best split and 'random' to choose the best random\n split. Defaults to 'best'.\n max_features (int, float, string or None, optional):\n The number of features to consider when looking for the best split:\n - If int, then consider `max_features` features at each split.\n - If float, then `max_features` is a percentage and `int(max_features *\n n_features)` features are considered at each split.\n - If 'auto', then `max_features=n_features`.\n - If 'sqrt', then `max_features=sqrt(n_features)`.\n - If 'log2', then `max_features=log2(n_features)`.\n - If None, then `max_features=n_features`.\n Note: the search for a split does not stop until at least one valid\n partition of the node samples is found, even if it requires to effectively\n inspect more than `max_features` features. Defaults to None.\n max_depth (int or None, optional):\n The maximum depth of the tree. If None, then nodes are expanded until all\n leaves are pure or until all leaves contain less than min_samples_split\n samples. Defaults to None.\n min_samples_split (int or float, optional):\n The minimum number of samples required to split an internal node:\n - If int, then consider `min_samples_split` as the minimum number.\n - If float, then `min_samples_split` is a percentage and\n `ceil(min_samples_split * n_samples)` are the minimum number of samples\n for each split. Defaults to 2.\n min_samples_leaf (int or float, optional):\n The minimum number of samples required to be at a leaf node:\n - If int, then consider `min_samples_leaf` as the minimum number.\n - If float, then `min_samples_leaf` is a percentage and\n `ceil(min_samples_leaf * n_samples)` are the minimum number of samples\n for each node. Defaults to 1.\n min_weight_fraction_leaf (float, optional):\n The minimum weighted fraction of the sum total of weights (of all the input\n samples) required to be at a leaf node. Samples have equal weight when\n sample_weight is not provided. Defaults to 0.0.\n max_leaf_nodes (int or None, optional):\n Grow a tree with `max_leaf_nodes` in best-first fashion. Best nodes are\n defined as relative reduction in impurity. If None then unlimited number of\n leaf nodes. Defaults to None.\n random_state (int, RandomState instance or None, optional):\n If int, random_state is the seed used by the random number generator; If\n RandomState instance, random_state is the random number generator; If None,\n the random number generator is the RandomState instance used by\n `np.random`. Defaults to None.\n\n Attributes:\n feature_importances_ (array):\n The feature importances, of shape = [n_features]. The higher, the more\n important the feature. The importance of a feature is computed as the\n (normalized) total reduction of the criterion brought by that feature. It\n is also known as the Gini importance.\n max_features_ (int):\n The inferred value of max_features.\n n_features_ (int):\n The number of features when `fit` is performed.\n n_outputs_ (int):\n The number of outputs when `fit` is performed.\n tree_ (Tree object):\n The underlying Tree object.\n y_train_ (array-like):\n Train target values.\n y_train_leaves_ (array-like):\n Cache the leaf nodes that each training sample falls into.\n y_train_leaves_[i] is the leaf that y_train[i] ends up at.\n "
def __init__(self, criterion: str='squared_error', splitter: str='best', max_features: Optional[Union[(int, float, str)]]=None, max_depth: Optional[int]=None, min_samples_split: Union[(int, float)]=2, min_samples_leaf: Union[(int, float)]=1, min_weight_fraction_leaf: float=0.0, max_leaf_nodes: Optional[int]=None, random_state: Union[(int, np.random.RandomState, None)]=None):
super().__init__(criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, random_state=random_state)
|
def weighted_percentile(arr: np.ndarray, quantile: float, weights: Optional[np.ndarray]=None, sorter: Optional[np.ndarray]=None):
'Returns the weighted percentile of an array.\n\n See [1] for an explanation of this concept.\n\n Args:\n arr (array-like):\n Samples at which the quantile should be computed, of shape [n_samples,].\n quantile (float):\n Quantile, between 0.0 and 1.0.\n weights (array-like, optional):\n The weights, of shape = (n_samples,). Here weights[i] is the weight given\n to point a[i] while computing the quantile. If weights[i] is zero, a[i] is\n simply ignored during the percentile computation. If None then uniform\n weights will be used. Defaults to None.\n sorter (array-like, optional):\n Array of shape [n_samples,], indicating the indices sorting `arr`. Thus, if\n provided, we assume that arr[sorter] is sorted. If None then `arr` will be\n sorted. Defaults to None.\n\n Returns:\n percentile: float\n Weighted percentile of `arr` at `quantile`.\n\n Raises:\n ValueError:\n If `quantile` is not between 0.0 and 1.0, or if `arr` and `weights` are of\n different lengths.\n\n Sources:\n [1]: https://en.wikipedia.org/wiki/Percentile#The_weighted_percentile_method\n '
if ((quantile > 1) or (quantile < 0)):
raise ValueError('The quantile should be between 0 and 1.')
weights_arr = (np.ones_like(arr) if (weights is None) else weights)
arr = np.asarray(arr, dtype=np.float32)
weights = np.asarray(weights_arr, dtype=np.float32)
if (len(arr) != len(weights_arr)):
raise ValueError('`arr` and `weights` should have the same length.')
if (sorter is not None):
arr = arr[sorter]
weights_arr = weights_arr[sorter]
non_zeros = (weights_arr != 0)
arr = arr[non_zeros]
weights_arr = weights_arr[non_zeros]
if (sorter is None):
sorted_indices = np.argsort(arr)
sorted_arr = arr[sorted_indices]
sorted_weights = weights_arr[sorted_indices]
else:
sorted_arr = arr
sorted_weights = weights_arr
sorted_cum_weights = np.cumsum(sorted_weights)
total = sorted_cum_weights[(- 1)]
partial_sum = (1.0 / total)
partial_sum *= (sorted_cum_weights - (sorted_weights / 2.0))
start = (np.searchsorted(partial_sum, quantile) - 1)
if (start == (len(sorted_cum_weights) - 1)):
return sorted_arr[(- 1)]
if (start == (- 1)):
return sorted_arr[0]
fraction = (quantile - partial_sum[start])
fraction /= (partial_sum[(start + 1)] - partial_sum[start])
return (sorted_arr[start] + (fraction * (sorted_arr[(start + 1)] - sorted_arr[start])))
|
def fix_dot_env_file():
'Ensures that the .env file exists and contains all desired variables.'
env_file_path = Path('.env')
env_file_path.touch(exist_ok=True)
env_file_lines = env_file_path.read_text().splitlines(keepends=False)
env_vars = [line.split('=')[0] for line in env_file_lines]
env_vars_missing = [env_var for env_var in DESIRED_ENVIRONMENT_VARIABLES.keys() if (env_var not in env_vars)]
with env_file_path.open('a') as f:
for env_var in env_vars_missing:
value = ''
if (env_var == 'GPG_KEY_ID'):
gpg = subprocess.Popen(['gpg', '--list-secret-keys', '--keyid-format=long'], stdout=subprocess.PIPE)
grep = subprocess.Popen(['grep', 'sec'], stdin=gpg.stdout, stdout=subprocess.PIPE)
value = subprocess.check_output(['sed', '-E', 's/.*\\/([^ ]+).*/\\1/'], stdin=grep.stdout).decode().strip('\n')
gpg.wait()
grep.wait()
if (value == ''):
value = input(DESIRED_ENVIRONMENT_VARIABLES[env_var])
f.write(f'''{env_var}="{value}"
''')
|
def bump_major():
'Add one to the major version.'
(major, _, _) = get_current_version()
set_new_version((major + 1), 0, 0)
|
def bump_minor():
'Add one to the minor version.'
(major, minor, _) = get_current_version()
set_new_version(major, (minor + 1), 0)
|
def bump_patch():
'Add one to the patch version.'
(major, minor, patch) = get_current_version()
set_new_version(major, minor, (patch + 1))
|
def set_new_version(major: int, minor: int, patch: int):
'Sets a new version.\n\n Args:\n major (int):\n The major version. This only changes when the code stops being backwards\n compatible.\n minor (int):\n The minor version. This changes when a backwards compatible change\n happened.\n patch (init):\n The patch version. This changes when the only new changes are bug fixes.\n '
version = f'{major}.{minor}.{patch}'
changelog_path = Path('CHANGELOG.md')
changelog = changelog_path.read_text()
if ('[Unreleased]' not in changelog):
raise RuntimeError('No [Unreleased] entry in CHANGELOG.md.')
today = dt.date.today().strftime('%Y-%m-%d')
new_changelog = re.sub('\\[Unreleased\\].*', f'[v{version}] - {today}', changelog)
changelog_path.write_text(new_changelog)
pyproject_path = Path('pyproject.toml')
pyproject = pyproject_path.read_text()
pyproject = re.sub('version = "[^"]+"', f'version = "{version}"', pyproject, count=1)
pyproject_path.write_text(pyproject)
subprocess.run(['git', 'add', 'CHANGELOG.md'])
subprocess.run(['git', 'add', 'pyproject.toml'])
subprocess.run(['git', 'commit', '-m', f'feat: v{version}'])
subprocess.run(['git', 'tag', f'v{version}'])
subprocess.run(['git', 'push'])
subprocess.run(['git', 'push', '--tags'])
|
def get_current_version() -> Tuple[(int, int, int)]:
'Fetch the current version of the package.\n\n Returns:\n triple of ints:\n The current version, separated into major, minor and patch versions.\n\n Raises:\n RuntimeError:\n If the version could not be determined.\n '
version_candidates = re.search('(?<=version = ")[^"]+(?=")', Path('pyproject.toml').read_text())
if (version_candidates is None):
raise RuntimeError('No version found in pyproject.toml.')
else:
version_str = version_candidates.group(0)
(major, minor, patch) = map(int, version_str.split('.'))
return (major, minor, patch)
|
def fetch(category: str, all_cats: list, max_results: int=5, start: int=0):
' Fetch papers from the ArXiv.\n\n INPUT\n category: str\n The name of the ArXiv category. Leave blank to search among all\n categories\n all_cats: list\n A list of all the categories\n max_results: int = 5\n Maximal number of papers scraped, ArXiv limits this to 10,000\n start: int = 0\n The index of the paper from which the scraping begins\n\n OUTPUT\n A list of dictionaries representing each paper entry, with each\n dictionary having the following keys:\n id: str\n The unique ArXiv identifier\n authors: str\n The authors of the paper, separated by commas\n updated: datetime.datetime\n Last updated date and time\n published: datetime.datetime\n Date and time when the paper was published on ArXiv\n title: str\n Title of the paper\n abstract: str\n Abstract of the paper\n categories: str\n The ArXiv categories that the paper falls under, separated\n by commas\n '
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from time import sleep
params = {'search_query': ('cat:' + category), 'start': start, 'max_results': max_results, 'sortBy': 'lastUpdatedDate', 'sortOrder': 'descending'}
while True:
try:
api_url = 'http://export.arxiv.org/api/query'
response = requests.get(api_url, params=params)
soup = BeautifulSoup(response._content, 'lxml')
break
except requests.exceptions.ConnectionError:
sleep(1)
continue
papers = []
for entry in soup.find_all('entry'):
cats = ','.join((cat['term'] for cat in entry.find_all('category') if (cat['term'] in all_cats)))
if (cats == ''):
continue
authors = ','.join((clean(name.string) for author in entry.find_all('author') for name in author.find_all('name')))
papers.append({'paper_id': entry.id.string, 'authors': authors, 'updated': datetime.fromisoformat(entry.updated.string[:(- 1)]), 'published': datetime.fromisoformat(entry.published.string[:(- 1)]), 'title': clean(entry.title.string), 'abstract': clean(entry.summary.string), 'categories': cats})
return papers
|
def scrape(db_name: str='arxiv_data', data_dir: str='.data', batch_size: int=1000, patience: int=20, overwrite: bool=False, start_from: str=None, log_path: str=None):
" Scrape papers from the ArXiv.\n\n INPUT\n db_name: str = 'arxiv_data'\n Name of the SQLite databse where the data will be stored\n data_dir: str = '.data'\n Directory in which the data files are to be stored\n batch_size: int = 1000\n The amount of papers fetched at each GET request - ArXiv limits\n this to 10,000\n patience: int = 20\n The amount of successive failed GET requests before moving on\n to the next category. The ArXiv API usually times out, resulting\n in a failed GET request, so this number should be reasonably\n large to rule these timeouts out\n overwrite: bool = False\n Whether the database file should be overwritten\n start_from: str = None\n A category to start from, which defaults to starting from scratch\n log_path: str = None\n Path to log file, with file extension. Defaults to no log file\n "
from time import sleep
from tqdm.auto import tqdm
from shutil import copy
from db import ArXivDatabase
from datetime import datetime
from pathlib import Path
data_dir = get_path(data_dir)
if (not data_dir.is_dir()):
data_dir.mkdir()
db_path = (data_dir / f'{db_name}.db')
if overwrite:
db_path.unlink()
if (log_path is not None):
log_path = Path(log_path)
log_path.write_text(f'{db_name} database log'.upper())
db = ArXivDatabase(name=db_name, data_dir=data_dir)
with db.engine.connect() as conn:
result = conn.execute('select cats.id from cats')
cats = [cat[0] for cat in result]
if (start_from is not None):
try:
cats = cats[cats.index(start_from):]
except ValueError:
pass
for cat in tqdm(cats, desc='Scraping ArXiv categories'):
if (log_path is not None):
log = log_path.read_text()
log += f'''
{datetime.today()} Started scraping {cat}'''
log_path.write_text(log)
with tqdm(leave=False) as pbar:
pbar.set_description(f'Scraping {cat}')
(cat_idx, strikes) = (0, 0)
while (strikes <= patience):
batch = fetch(category=cat, max_results=batch_size, start=cat_idx, all_cats=cats)
sleep(5)
if len(batch):
strikes = 0
else:
strikes += 1
continue
db.insert_papers(batch)
pbar.update(len(batch))
cat_idx += len(batch)
if (log_path is not None):
log = log_path.read_text()
log += f'''
{datetime.today()} Scraped {cat_idx} papers of category {cat}'''
log_path.write_text(log)
if (log_path is not None):
log = log_path.read_text()
log += f'''
{datetime.today()} Finished scraping {cat}'''
log_path.write_text(log)
|
class BatchWrapper():
' Wrap a torchtext data iterator. '
def __init__(self, data_iter, vectors: str, cats: list):
self.data_iter = data_iter
self.batch_size = data_iter.batch_size
self.vectors = vectors
self.cats = cats
def __iter__(self):
for batch in self.data_iter:
x = batch.text
y = torch.cat([getattr(batch, cat).unsqueeze(1) for cat in self.cats], dim=1).float()
(yield (x, y))
def __len__(self):
return len(self.data_iter)
|
def preprocess_data(tsv_fname: str='arxiv_data', txt_fname: str='preprocessed_docs.txt', data_dir: str='.data', batch_size: int=1000):
" \n Preprocess text data. This merges titles and abstracts and separates \n tokens by spaces. It saves this into a text file and also saves a\n dataframe with all the categories. Note that this function uses a \n constant amount of memory, which is achieved by working in batches \n and writing directly to the disk.\n \n INPUT\n tsv_fname: str\n The name of the tsv file containing all the categories, \n without file extension\n txt_fname: str\n The name of the txt file containing the preprocessed texts\n data_dir: str = '.data'\n The data directory\n batch_size: int = 1000\n The amount of rows being preprocessed at a time\n "
import spacy
cats_in = (get_path(data_dir) / (tsv_fname + '.tsv'))
cats_out = (get_path(data_dir) / (tsv_fname + '_pp.tsv'))
txt_path = (get_path(data_dir) / txt_fname)
nlp = spacy.load('en')
tokenizer = nlp.Defaults.create_tokenizer(nlp)
df = pd.read_csv(cats_in, sep='\t', usecols=['title', 'abstract'])
df.dropna(inplace=True)
docs = (((('-TITLE_START- ' + df['title']) + ' -TITLE_END- -ABSTRACT_START- ') + df['abstract']) + ' -ABSTRACT_END-')
del df
with tqdm(desc='Preprocessing texts', total=len(docs)) as pbar:
with open(txt_path, 'w') as f:
for doc in tokenizer.pipe(docs, batch_size=batch_size):
f.write((' '.join((tok.text for tok in doc)) + '\n'))
pbar.update()
df = pd.read_csv(cats_in, sep='\t').dropna()
df.drop(columns=['title', 'abstract'], inplace=True)
cats = df.columns.tolist()
with open(txt_path, 'r') as f:
df['text'] = f.readlines()
df = df[(['text'] + cats)]
df.to_csv(cats_out, sep='\t', index=False)
|
def load_data(tsv_fname: str='arxiv_data_pp', data_dir: str='.data', batch_size: int=32, split_ratio: float=0.95, random_seed: int=42, vectors: str='fasttext') -> tuple:
" \n Loads the preprocessed data, tokenises it, builds a vocabulary,\n splits into a training- and validation set, numeralises the texts,\n batches the data into batches of similar text lengths and pads \n every batch.\n\n INPUT\n tsv_fname: str = 'arxiv_data_pp'\n The name of the tsv file, without file extension\n data_dir: str = '.data'\n The data directory\n batch_size: int = 32,\n The size of each batch\n split_ratio: float = 0.95\n The proportion of the dataset reserved for training\n vectors: {'fasttext', 'glove'} = 'fasttext'\n The type of word vectors to use. Here the FastText vectors are\n trained on the abstracts and the GloVe vectors are pretrained\n on the 6B corpus\n random_seed: int = 42\n A random seed to ensure that the same training/validation split\n is achieved every time. If set to None then no seed is used.\n\n OUTPUT\n A triple (train_iter, val_iter, params), with train_iter and val_iter\n being the iterators that iterates over the training- and validation\n samples, respectively, and params is a dictionary with entries:\n vocab_size\n The size of the vocabulary\n emb_dim\n The dimension of the word vectors\n emb_matrix\n The embedding matrix containing the word vectors\n "
from torchtext import data, vocab
from utils import get_cats
import random
TXT = data.Field()
CAT = data.Field(sequential=False, use_vocab=False, is_target=True)
cats = get_cats(data_dir=data_dir)['id']
fields = ([('text', TXT)] + [(cat, CAT) for cat in cats])
dataset = data.TabularDataset(path=(get_path(data_dir) / f'{tsv_fname}.tsv'), format='tsv', fields=fields, skip_header=True)
if (random_seed is None):
(train, val) = dataset.split(split_ratio=split_ratio)
else:
random.seed(random_seed)
(train, val) = dataset.split(split_ratio=split_ratio, random_state=random.getstate())
vector_cache = get_path(data_dir)
base_url = 'https://filedn.com/lRBwPhPxgV74tO0rDoe8SpH/scholarly_data/'
vecs = vocab.Vectors(name=vectors, cache=vector_cache, url=(base_url + vectors))
TXT.build_vocab(train, vectors=vecs)
(train_iter, val_iter) = data.BucketIterator.splits(datasets=(train, val), batch_size=batch_size, sort_key=(lambda sample: len(sample.text)))
train_dl = BatchWrapper(train_iter, vectors=vectors, cats=cats)
val_dl = BatchWrapper(val_iter, vectors=vectors, cats=cats)
del dataset, train, val, train_iter, val_iter
return (train_dl, val_dl, TXT.vocab)
|
class ArXivDatabase():
" A SQLite databse for storing ArXiv papers. \n \n INPUT\n name: str = 'arxiv_data.db'\n Name of the database\n data_dir: str = '.data'\n Folder which contains the database\n "
def __init__(self, name: str='arxiv_data.db', data_dir: str='.data'):
from sqlalchemy import create_engine
db_path = (get_path(data_dir) / name)
self.engine = create_engine(f'sqlite:///{db_path}')
self.data_dir = data_dir
self.create_tables()
self.populate_cats()
def create_tables(self):
' Create the tables of the database. '
from sqlalchemy import MetaData, Table, Column
from sqlalchemy import String, DateTime, ForeignKey
metadata = MetaData()
Table('master_cats', metadata, Column('id', String, primary_key=True), Column('name', String))
Table('cats', metadata, Column('id', String, primary_key=True), Column('name', String), Column('master_cat', String, ForeignKey('master_cats.id')))
Table('papers', metadata, Column('id', String, primary_key=True), Column('updated', DateTime), Column('published', DateTime), Column('title', String), Column('abstract', String))
Table('papers_cats', metadata, Column('paper_id', String, ForeignKey('papers.id'), primary_key=True), Column('category_id', String, ForeignKey('cats.id'), primary_key=True))
Table('authors', metadata, Column('id', String, primary_key=True))
Table('papers_authors', metadata, Column('paper_id', String, ForeignKey('papers.id'), primary_key=True), Column('author_id', String, ForeignKey('authors.id'), primary_key=True))
metadata.create_all(self.engine)
return self
def populate_cats(self):
' Fetch list of all ArXiv categories from arxitics.com and\n use it to populate the cats, master_cats and cats_master_cats\n tables in the database. '
import requests
from bs4 import BeautifulSoup
from tqdm.auto import tqdm
master_cats = {'physics': 'Physics', 'math': 'Mathematics', 'cs': 'Computer Science', 'q-bio': 'Quantitative Biology', 'q-fin': 'Quantitative Finance', 'stats': 'Statistics'}
master_cat_query = 'insert or ignore into master_cats values '
master_cat_query += ','.join((f'("{id}", "{name}")' for (id, name) in master_cats.items()))
(ids, names, mcats) = ([], [], [])
base_url = 'http://arxitics.com/help/categories'
for master_cat in tqdm(master_cats, desc='Setting up categories'):
response = requests.get(base_url, {'group': master_cat})
soup = BeautifulSoup(response._content, 'lxml')
for li in soup.find_all('li'):
if (li.strong is not None):
ids.append(li.strong.text)
names.append(li.span.text[2:])
mcats.append(master_cat)
cat_query = 'insert or ignore into cats values '
cat_query += ','.join((f'("{id}", "{name}", "{mcat}")' for (id, name, mcat) in zip(ids, names, mcats)))
with self.engine.connect() as conn:
conn.execute(master_cat_query)
conn.execute(cat_query)
return self
def insert_papers(self, papers: list):
' Insert papers into the database.\n \n INPUT\n papers: list\n A list of dictionaries, each containing the following keys:\n id: str\n The unique ArXiv identifier\n authors: str\n The authors of the paper, separated by commas\n updated: datetime.datetime\n Last updated date and time\n published: datetime.datetime\n Date and time when the paper was published on ArXiv\n title: str\n Title of the paper\n abstract: str\n Abstract of the paper\n categories: str\n The ArXiv categories that the paper falls under, \n separated by commas\n '
p_entry = (lambda paper: f'''
(
"{paper['paper_id']}",
"{paper['updated']}",
"{paper['published']}",
"{paper['title']}",
"{paper['abstract']}"
)
''')
a_entry = (lambda author: f'''
(
"{author.strip()}"
)
''')
pc_entry = (lambda paper, cat: f'''
(
"{paper['paper_id']}",
"{cat.strip()}"
)
''')
pa_entry = (lambda paper, author: f'''
(
"{paper['paper_id']}",
"{author.strip()}"
)
''')
p_query = 'insert or ignore into papers values '
p_query += ','.join((p_entry(paper) for paper in papers))
a_query = 'insert or ignore into authors values '
a_query += ','.join((a_entry(author) for paper in papers for author in paper['authors'].split(',')))
pc_query = 'insert or ignore into papers_cats values '
pc_query += ','.join((pc_entry(paper, cat) for paper in papers for cat in paper['categories'].split(',')))
pa_query = 'insert or ignore into papers_authors values '
pa_query += ','.join((pa_entry(paper, author) for paper in papers for author in paper['authors'].split(',')))
with self.engine.connect() as conn:
conn.execute(p_query)
conn.execute(a_query)
conn.execute(pc_query)
conn.execute(pa_query)
return self
def get_cats(self, conn=None) -> list:
' Get a list of all the categories. '
import json
query = 'select id, name from cats order by id'
if (conn is None):
with self.engine.connect() as conn:
cat_result = list(conn.execute(query))
cats = {'id': [cat[0] for cat in cat_result], 'name': [cat[1] for cat in cat_result]}
else:
cat_result = list(conn.execute(query))
cats = {'id': [cat[0] for cat in cat_result], 'name': [cat[1] for cat in cat_result]}
with open((get_path(self.data_dir) / 'cats.json'), 'w') as f:
json.dump(cats, f)
return cats
def get_mcat_dict(self, conn=None) -> dict:
' Get a dictionary mapping each category to its master category. '
import json
if (conn is None):
with self.engine.connect() as conn:
mcat_result = conn.execute('select id, master_cat from cats')
mcat_dict = {pair[0]: pair[1] for pair in mcat_result}
else:
mcat_result = conn.execute('select id, master_cat from cats')
mcat_dict = {pair[0]: pair[1] for pair in mcat_result}
with open((get_path(self.data_dir) / 'mcat_dict.json'), 'w') as f:
json.dump(mcat_dict, f)
return mcat_dict
def get_training_df(self):
' Get a dataframe with titles, abstracts and categories\n of all the papers in the database.\n\n OUTPUT\n A Pandas DataFrame object with columns title, abstract (or a\n single text column if merge_title_abstract is True) and\n a column for every ArXiv category or master category\n '
import pandas as pd
from tqdm.auto import tqdm
with self.engine.connect() as conn:
cats = self.get_cats(conn)
df = pd.read_sql_table(table_name='papers', con=conn, columns=['id', 'title', 'abstract'])
for cat in tqdm(cats, desc='Creating dataframe'):
query = f'''select paper_id, category_id from papers_cats
where category_id = "{cat}"'''
paper_ids = [paper[0] for paper in conn.execute(query)]
bool_col = df['id'].isin(paper_ids).astype(int)
df[cat] = bool_col
df = df.drop(columns=['id'])
df.to_csv((get_path(self.data_dir) / 'arxiv_data.tsv'), sep='\t', index=False)
return df
|
def end2end(mcat_ratio: float, epochs: int, dim: int, nlayers: int, fname: str, gpu: bool, name: str, lr: float, batch_size: int, split_ratio: float, vectors: str, data_dir: str, pbar_width: int, wandb: bool, boom_dim: int, dropout: float, ema: float, overwrite_model: bool) -> str:
' Loads the data, preprocesses it if needed, builds the SHARNN model,\n trains it and evaluates it. '
from data import load_data
from modules import SHARNN, LogReg
pp_path = (get_path(data_dir) / f'{fname}_pp.tsv')
if (not pp_path.is_file()):
from data import preprocess_data
raw_path = (get_path(data_dir) / f'{fname}.tsv')
cats_path = (get_path(data_dir) / 'cats.json')
mcat_dict_path = (get_path(data_dir) / 'mcat_dict.json')
if (not (raw_path.is_file() and cats_path.is_file() and mcat_dict_path.is_file())):
from db import ArXivDatabase
db = ArXivDatabase(data_dir=data_dir)
db.get_mcat_dict()
db.get_cats()
if (not raw_path.is_file()):
db.get_training_df()
preprocess_data(data_dir=data_dir)
(train_dl, val_dl, vocab) = load_data(tsv_fname=f'{fname}_pp', batch_size=batch_size, split_ratio=split_ratio, vectors=vectors, data_dir=data_dir)
model = LogReg(dim=dim, nlayers=nlayers, data_dir=data_dir, pbar_width=pbar_width, vocab=vocab, boom_dim=boom_dim, dropout=dropout)
if gpu:
model.cuda()
model = model.fit(train_dl, val_dl, epochs=epochs, lr=lr, mcat_ratio=mcat_ratio, name=name, use_wandb=wandb, ema=ema, overwrite_model=overwrite_model)
return model.evaluate(val_dl)
|
def predict(model, title: str, abstract: str):
' Get the predicted categories from a model, a title and an abstract.\n\n INPUT\n model: torch.nn.Module\n A trained model\n title: str\n The title of a research paper\n abstract: str\n The abstract of a research paper\n \n OUTPUT\n A list of pairs (category_name, probability), with category_name\n being the official name of the arXiv category, such as math.LO,\n and the probability being the outputted sigmoid value from the\n model. The list will only contain pairs where the probability is\n above 50%.\n '
import spacy
text = f'-TITLE_START- {clean(title)} -TITLE_END- -ABSTRACT_START- {clean(abstract)} -ABSTRACT_END-'
nlp = spacy.load('en')
tokenizer = nlp.Defaults.create_tokenizer(nlp)
idxs = torch.LongTensor([[model.stoi[t.text] for t in tokenizer(text)]])
logits = model(idxs.transpose(0, 1))
probs = torch.sigmoid(logits)
cats = get_cats(data_dir=model.data_dir)['id']
sorted_idxs = probs.argsort(descending=True)
predicted_cats = [(cats[idx], round(float(probs[idx]), 2)) for idx in sorted_idxs if (probs[idx] >= min(0.5, torch.max(probs)))]
return predicted_cats
|
def evaluate(model, val_dl, output_dict: bool=False):
' Evaluate a model on a validation dataset.\n\n INPUT\n model: torch.nn.Module\n A trained model\n val_dl: torch.utils.data.DataLoader\n A data loader containing a validation dataset\n output_dict: bool = False\n Whether to output a dictionary instead of a string\n\n OUTPUT\n A string or dictionary containing a classification report\n '
from sklearn.metrics import classification_report
import warnings
with torch.no_grad():
model.eval()
(y_vals, y_hats) = ([], [])
for (x_val, y_val) in val_dl:
if model.is_cuda():
x_val = x_val.cuda()
y_val = y_val.cuda()
yhat = model(x_val)
preds = (torch.sigmoid(yhat) > 0.5)
y_vals.append(y_val.int())
y_hats.append(preds.int())
y_val = torch.cat(y_vals, dim=0)
y_hat = torch.cat(y_hats, dim=0)
cats = get_cats(data_dir=model.data_dir)['id']
with warnings.catch_warnings():
warnings.simplefilter('ignore')
report = classification_report(y_true=y_val.cpu(), y_pred=y_hat.cpu(), target_names=cats, output_dict=output_dict)
return report
|
def train_fasttext(txt_fname: str='preprocessed_docs.txt', model_fname: str='fasttext.bin', vec_fname: str='fasttext', data_dir: str='.data', lr: float=0.05, emb_dim: int=100, window: int=5, epochs: int=5, min_count: int=5, min_char_ngram: int=3, max_char_ngram: int=6, neg_samples: int=5, max_word_ngram: int=1):
" Train FastText vectors on a corpus. All default values are the \n official defaults.\n\n INPUT\n txt_fname: str\n The name of the txt file containing the corpus, including its\n file extension\n model_fname: str = 'fasttext.bin'\n The name of the output FastText model file\n vec_fname: str = 'fasttext'\n The name of the output txt file containing the word vectors\n data_dir: str = '.data'\n The directory containing all data files\n lr: float = 0.05\n The learning rate\n emb_dim: int = 100\n The dimension of the word embeddings\n window: int = 5\n The size of the window considered at every word, where the\n model will learn to guess the word based on the words within\n the window (its 'context')\n min_count: int = 5\n The minimal number of times a word has to occur to be assigned\n a word vector\n min_char_ngram: int = 3\n The minimum number of characters in the character n-grams\n max_char_ngram: int = 6\n The maximum number of characters in the character n-grams\n neg_samples: int = 5\n How many negative samples to include for every positive sample\n max_word_ngram: int = 1\n The maximum number of words in the word n-grams\n "
import fasttext
from tqdm.auto import tqdm
txt_path = (get_path(data_dir) / txt_fname)
model_path = (get_path(data_dir) / model_fname)
ft = fasttext.train_unsupervised(str(txt_path), lr=lr, dim=emb_dim, ws=window, epoch=epochs, minCount=min_count, minn=min_char_ngram, maxn=max_char_ngram, neg=neg_samples, wordNgrams=max_word_ngram)
ft.save_model(str(model_path))
with open((get_path(data_dir) / vec_fname), 'w') as f:
for word in tqdm(ft.words, desc='Saving word vectors'):
vec_string = ' '.join((str(x) for x in ft.get_word_vector(word)))
f.write((((word + ' ') + vec_string) + '\n'))
del ft
|
class NestedBCELoss(nn.Module):
" A nested form of binary cross entropy.\n\n From the category predictions it pulls out the master category\n predictions, using the utils.cats2mcats function, which enables\n a positive master category prediction even though all individual\n category predictions within that master category have sigmoid values\n less than 0.50.\n\n It then computes the binary cross entropy of the category- and master\n category predictions, with the given class weights, and scales the\n two losses in accordance with mcat_ratio.\n\n INPUT\n cat_weights: torch.FloatTensor\n The class weights for the categories\n mcat_weights: torch.FloatTensor\n The class weights for the master categories\n mcat_ratio: float = 0.1\n The ratio between the category loss and the master category loss\n data_dir: str = '.data'\n The path to the data directory\n "
def __init__(self, cat_weights, mcat_weights, mcat_ratio: float=0.1, data_dir: str='.data'):
super().__init__()
self.masks = get_mcat_masks(data_dir=data_dir)
self.cat_weights = cat_weights
self.mcat_weights = mcat_weights
self.mcat_ratio = mcat_ratio
self.data_dir = data_dir
def forward(self, pred, target, weighted: bool=True):
(mpred, mtarget) = cats2mcats(pred, target, masks=self.masks, data_dir=self.data_dir)
cat_loss = F.binary_cross_entropy_with_logits(pred, target, pos_weight=(self.cat_weights if weighted else None))
mcat_loss = F.binary_cross_entropy_with_logits(mpred, mtarget, pos_weight=(self.mcat_weights if weighted else None))
cat_loss *= (1 - self.mcat_ratio)
mcat_loss *= self.mcat_ratio
return (cat_loss + mcat_loss)
def cuda(self):
self.masks = self.masks.cuda()
self.cat_weights = self.cat_weights.cuda()
self.mcat_weights = self.mcat_weights.cuda()
return self
|
def train_model(model, train_dl, val_dl, epochs: int=10, lr: float=0.0003, name: str='no_name', mcat_ratio: float=0.1, ema: float=0.99, pbar_width: int=None, use_wandb: bool=True, overwrite_model: bool=True):
" Train a given model. \n \n INPUT\n model: torch.nn.Module\n The model we would like to train\n train_dl: torch.utils.data.DataLoader\n A dataloader containing the training set\n val_dl : torch.utils.data.DataLoader\n A dataloader containing the validation set\n epochs: int = 10\n The amount of epochs to train\n lr: float = 3e-4\n The learning rate used\n name: str = 'no_name'\n The name of the training run, used for wandb purposes\n mcat_ratio: float = 0.1\n How much the master category loss is prioritised over the\n category loss\n ema: float = 0.99\n The fact used in computing the exponential moving averages of\n the loss and sample-average F1 scores. Roughly corresponds to\n taking the average of the previous 1 / (1 - ema) many batches\n pbar_width: int = None\n The width of the progress bar. If running in a Jupyter notebook\n then this should be set to ~1000\n use_wandb: bool = True\n Whether to use the Weights & Biases online performance recording\n overwrite_model: bool = True\n Whether to overwrite existing models when saving\n\n OUTPUT\n The trained model\n "
from sklearn.metrics import f1_score
import warnings
from pathlib import Path
print(f'Training on {(len(train_dl) * train_dl.batch_size):,d} samples and validating on {(len(val_dl) * val_dl.batch_size):,d} samples.')
print(f'Number of trainable parameters: {model.trainable_params():,d}')
if use_wandb:
import wandb
config = {'name': name, 'mcat_ratio': mcat_ratio, 'epochs': epochs, 'lr': lr, 'batch_size': train_dl.batch_size, 'ema': ema, 'vectors': train_dl.vectors, 'dropout': model.params['dropout'], 'nlayers': model.params['nlayers'], 'dim': model.params['dim'], 'boom_dim': model.params['boom_dim'], 'emb_dim': model.params['vocab'].vectors.shape[1]}
wandb.init(project='scholarly', config=config)
wandb.watch(model)
weights = get_class_weights(train_dl, pbar_width=model.pbar_width, data_dir=model.data_dir)
criterion = NestedBCELoss(**weights, mcat_ratio=mcat_ratio, data_dir=model.data_dir)
optimizer = optim.Adam(model.parameters(), lr=lr)
mcat_masks = get_mcat_masks(data_dir=model.data_dir)
if model.is_cuda():
mcat_masks = mcat_masks.cuda()
criterion = criterion.cuda()
(avg_loss, avg_cat_f1, avg_mcat_f1, best_score) = (0, 0, 0, 0)
for epoch in range(epochs):
with tqdm(total=(len(train_dl) * train_dl.batch_size), ncols=model.pbar_width) as pbar:
model.train()
for (idx, (x_train, y_train)) in enumerate(train_dl):
optimizer.zero_grad()
if model.is_cuda():
x_train = x_train.cuda()
y_train = y_train.cuda()
y_hat = model(x_train)
preds = torch.sigmoid(y_hat)
(my_hat, my_train) = cats2mcats(y_hat, y_train, masks=mcat_masks, data_dir=model.data_dir)
mpreds = torch.sigmoid(my_hat)
loss = criterion(y_hat, y_train)
loss.backward()
optimizer.step()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
cat_f1 = f1_score((preds.cpu() > 0.5), y_train.cpu(), average='samples')
mcat_f1 = f1_score((mpreds.cpu() > 0.5), my_train.cpu(), average='samples')
iteration = ((epoch * len(train_dl)) * train_dl.batch_size)
iteration += (idx * train_dl.batch_size)
avg_loss = ((ema * avg_loss) + ((1 - ema) * float(loss)))
avg_loss /= (1 - (ema ** ((iteration / (1 - ema)) + 1)))
avg_cat_f1 = ((ema * avg_cat_f1) + ((1 - ema) * float(cat_f1)))
avg_cat_f1 /= (1 - (ema ** ((iteration / (1 - ema)) + 1)))
avg_mcat_f1 = ((ema * avg_mcat_f1) + ((1 - ema) * float(mcat_f1)))
avg_mcat_f1 /= (1 - (ema ** ((iteration / (1 - ema)) + 1)))
if use_wandb:
wandb.log({'loss': avg_loss, 'cat f1': avg_cat_f1, 'mcat f1': avg_mcat_f1})
desc = f'Epoch {epoch:2d} - loss {avg_loss:.4f} - cat f1 {avg_cat_f1:.4f} - mcat f1 {avg_mcat_f1:.4f}'
pbar.set_description(desc)
pbar.update(train_dl.batch_size)
with torch.no_grad():
model.eval()
(val_loss, val_cat_f1, val_mcat_f1) = (0, 0, 0)
(y_vals, y_hats) = ([], [])
for (x_val, y_val) in val_dl:
if model.is_cuda():
x_val = x_val.cuda()
y_val = y_val.cuda()
y_hat = model(x_val)
preds = torch.sigmoid(y_hat)
(my_hat, my_val) = cats2mcats(y_hat, y_val, masks=mcat_masks, data_dir=model.data_dir)
mpreds = torch.sigmoid(my_hat)
y_vals.append(y_val)
y_hats.append((preds > 0.5))
val_loss += float(criterion(y_hat, y_val, weighted=False))
with warnings.catch_warnings():
warnings.simplefilter('ignore')
val_cat_f1 += f1_score((preds.cpu() > 0.5), y_val.cpu(), average='samples')
val_mcat_f1 += f1_score((mpreds.cpu() > 0.5), my_val.cpu(), average='samples')
y_val = torch.cat(y_vals, dim=0)
y_hat = torch.cat(y_hats, dim=0)
val_loss /= len(val_dl)
val_cat_f1 /= len(val_dl)
val_mcat_f1 /= len(val_dl)
if use_wandb:
wandb.log({'val loss': val_loss, 'val cat f1': val_cat_f1, 'val mcat f1': val_mcat_f1})
if (val_cat_f1 > best_score):
model_fname = f'model_{(val_cat_f1 * 100):.2f}.pt'
best_score = val_cat_f1
data = {'params': model.params, 'state_dict': model.state_dict(), 'scores': model.evaluate(val_dl, output_dict=True)}
if overwrite_model:
for f in get_path(model.data_dir).glob(f'model*.pt'):
f.unlink()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
path = (get_path(model.data_dir) / model_fname)
torch.save(data, path)
if use_wandb:
if overwrite_model:
for f in Path(wandb.run.dir).glob(f'model*.pt'):
f.unlink()
torch.save(data, (Path(wandb.run.dir) / model_fname))
wandb.save(model_fname)
desc = f'Epoch {epoch:2d} - loss {avg_loss:.4f} - cat f1 {avg_cat_f1:.4f} - mcat f1 {avg_mcat_f1:.4f} - val_loss {val_loss:.4f} - val cat f1 {val_cat_f1:.4f} - val mcat f1 {val_mcat_f1:.4f}'
pbar.set_description(desc)
return model
|
@app.route('/')
def index():
return redirect('/scholarly')
|
@app.route('/scholarly', methods=['POST', 'GET'])
def result():
import json
from scholarly.modules import load_model
data_dict = (request.form if (request.method == 'POST') else request.args)
if (not data_dict):
return render_template('scholarly.html')
model_path = next(Path('.data').glob('scholarly_model*.pt'))
(model, _) = load_model(model_path)
preds = model.predict(data_dict['title'], data_dict['abstract'])
if (request.method == 'POST'):
return render_template('scholarly.html', preds=preds, **data_dict)
else:
return json.dumps(preds)
|
def upgrade_config(cfg: CN, to_version: Optional[int]=None) -> CN:
'\n Upgrade a config from its current version to a newer version.\n\n Args:\n cfg (CfgNode):\n to_version (int): defaults to the latest version.\n '
cfg = cfg.clone()
if (to_version is None):
to_version = _C.VERSION
assert (cfg.VERSION <= to_version), 'Cannot upgrade from v{} to v{}!'.format(cfg.VERSION, to_version)
for k in range(cfg.VERSION, to_version):
converter = globals()[('ConverterV' + str((k + 1)))]
converter.upgrade(cfg)
cfg.VERSION = (k + 1)
return cfg
|
def downgrade_config(cfg: CN, to_version: int) -> CN:
'\n Downgrade a config from its current version to an older version.\n\n Args:\n cfg (CfgNode):\n to_version (int):\n\n Note:\n A general downgrade of arbitrary configs is not always possible due to the\n different functionalities in different versions.\n The purpose of downgrade is only to recover the defaults in old versions,\n allowing it to load an old partial yaml config.\n Therefore, the implementation only needs to fill in the default values\n in the old version when a general downgrade is not possible.\n '
cfg = cfg.clone()
assert (cfg.VERSION >= to_version), 'Cannot downgrade from v{} to v{}!'.format(cfg.VERSION, to_version)
for k in range(cfg.VERSION, to_version, (- 1)):
converter = globals()[('ConverterV' + str(k))]
converter.downgrade(cfg)
cfg.VERSION = (k - 1)
return cfg
|
def guess_version(cfg: CN, filename: str) -> int:
'\n Guess the version of a partial config where the VERSION field is not specified.\n Returns the version, or the latest if cannot make a guess.\n\n This makes it easier for users to migrate.\n '
logger = logging.getLogger(__name__)
def _has(name: str) -> bool:
cur = cfg
for n in name.split('.'):
if (n not in cur):
return False
cur = cur[n]
return True
ret = None
if (_has('MODEL.WEIGHT') or _has('TEST.AUG_ON')):
ret = 1
if (ret is not None):
logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret))
else:
ret = _C.VERSION
logger.warning("Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format(filename, ret))
return ret
|
def _rename(cfg: CN, old: str, new: str) -> None:
old_keys = old.split('.')
new_keys = new.split('.')
def _set(key_seq: List[str], val: str) -> None:
cur = cfg
for k in key_seq[:(- 1)]:
if (k not in cur):
cur[k] = CN()
cur = cur[k]
cur[key_seq[(- 1)]] = val
def _get(key_seq: List[str]) -> CN:
cur = cfg
for k in key_seq:
cur = cur[k]
return cur
def _del(key_seq: List[str]) -> None:
cur = cfg
for k in key_seq[:(- 1)]:
cur = cur[k]
del cur[key_seq[(- 1)]]
if ((len(cur) == 0) and (len(key_seq) > 1)):
_del(key_seq[:(- 1)])
_set(new_keys, _get(old_keys))
_del(old_keys)
|
class _RenameConverter():
'\n A converter that handles simple rename.\n '
RENAME: List[Tuple[(str, str)]] = []
@classmethod
def upgrade(cls, cfg: CN) -> None:
for (old, new) in cls.RENAME:
_rename(cfg, old, new)
@classmethod
def downgrade(cls, cfg: CN) -> None:
for (old, new) in cls.RENAME[::(- 1)]:
_rename(cfg, new, old)
|
class ConverterV1(_RenameConverter):
RENAME = [('MODEL.RPN_HEAD.NAME', 'MODEL.RPN.HEAD_NAME')]
|
class ConverterV2(_RenameConverter):
'\n A large bulk of rename, before public release.\n '
RENAME = [('MODEL.WEIGHT', 'MODEL.WEIGHTS'), ('MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE', 'MODEL.SEM_SEG_HEAD.LOSS_WEIGHT'), ('MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE', 'MODEL.RPN.LOSS_WEIGHT'), ('MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE', 'MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT'), ('MODEL.PANOPTIC_FPN.COMBINE_ON', 'MODEL.PANOPTIC_FPN.COMBINE.ENABLED'), ('MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD', 'MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH'), ('MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT', 'MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT'), ('MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD', 'MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH'), ('MODEL.ROI_HEADS.SCORE_THRESH', 'MODEL.ROI_HEADS.SCORE_THRESH_TEST'), ('MODEL.ROI_HEADS.NMS', 'MODEL.ROI_HEADS.NMS_THRESH_TEST'), ('MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD', 'MODEL.RETINANET.SCORE_THRESH_TEST'), ('MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES', 'MODEL.RETINANET.TOPK_CANDIDATES_TEST'), ('MODEL.RETINANET.INFERENCE_NMS_THRESHOLD', 'MODEL.RETINANET.NMS_THRESH_TEST'), ('TEST.DETECTIONS_PER_IMG', 'TEST.DETECTIONS_PER_IMAGE'), ('TEST.AUG_ON', 'TEST.AUG.ENABLED'), ('TEST.AUG_MIN_SIZES', 'TEST.AUG.MIN_SIZES'), ('TEST.AUG_MAX_SIZE', 'TEST.AUG.MAX_SIZE'), ('TEST.AUG_FLIP', 'TEST.AUG.FLIP')]
@classmethod
def upgrade(cls, cfg: CN) -> None:
super().upgrade(cfg)
if (cfg.MODEL.META_ARCHITECTURE == 'RetinaNet'):
_rename(cfg, 'MODEL.RETINANET.ANCHOR_ASPECT_RATIOS', 'MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS')
_rename(cfg, 'MODEL.RETINANET.ANCHOR_SIZES', 'MODEL.ANCHOR_GENERATOR.SIZES')
del cfg['MODEL']['RPN']['ANCHOR_SIZES']
del cfg['MODEL']['RPN']['ANCHOR_ASPECT_RATIOS']
else:
_rename(cfg, 'MODEL.RPN.ANCHOR_ASPECT_RATIOS', 'MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS')
_rename(cfg, 'MODEL.RPN.ANCHOR_SIZES', 'MODEL.ANCHOR_GENERATOR.SIZES')
del cfg['MODEL']['RETINANET']['ANCHOR_SIZES']
del cfg['MODEL']['RETINANET']['ANCHOR_ASPECT_RATIOS']
del cfg['MODEL']['RETINANET']['ANCHOR_STRIDES']
@classmethod
def downgrade(cls, cfg: CN) -> None:
super().downgrade(cfg)
_rename(cfg, 'MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS', 'MODEL.RPN.ANCHOR_ASPECT_RATIOS')
_rename(cfg, 'MODEL.ANCHOR_GENERATOR.SIZES', 'MODEL.RPN.ANCHOR_SIZES')
cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS
cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES
cfg.MODEL.RETINANET.ANCHOR_STRIDES = []
|
class CfgNode(_CfgNode):
'\n The same as `fvcore.common.config.CfgNode`, but different in:\n\n 1. Use unsafe yaml loading by default.\n Note that this may lead to arbitrary code execution: you must not\n load a config file from untrusted sources before manually inspecting\n the content of the file.\n 2. Support config versioning.\n When attempting to merge an old config, it will convert the old config automatically.\n '
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool=True) -> None:
assert PathManager.isfile(cfg_filename), "Config file '{cfg_filename}' does not exist!"
loaded_cfg = _CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
from .defaults import _C
latest_ver = _C.VERSION
assert (latest_ver == self.VERSION), 'CfgNode.merge_from_file is only allowed on a config object of latest version!'
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get('VERSION', None)
if (loaded_ver is None):
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert (loaded_ver <= self.VERSION), 'Cannot merge a v{} config into a v{} config.'.format(loaded_ver, self.VERSION)
if (loaded_ver == self.VERSION):
self.merge_from_other_cfg(loaded_cfg)
else:
from .compat import upgrade_config, downgrade_config
logger.warning("Loading an old v{} config file '{}' by automatically upgrading to v{}. See docs/CHANGELOG.md for instructions to update your files.".format(loaded_ver, cfg_filename, self.VERSION))
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
'\n Returns:\n str: a yaml string representation of the config\n '
return super().dump(*args, **kwargs)
|
def get_cfg() -> CfgNode:
'\n Get a copy of the default config.\n\n Returns:\n a detectron2 CfgNode instance.\n '
from .defaults import _C
return _C.clone()
|
def set_global_cfg(cfg: CfgNode) -> None:
'\n Let the global config point to the given cfg.\n\n Assume that the given "cfg" has the key "KEY", after calling\n `set_global_cfg(cfg)`, the key can be accessed by:\n\n .. code-block:: python\n\n from detectron2.config import global_cfg\n print(global_cfg.KEY)\n\n By using a hacky global config, you can access these configs anywhere,\n without having to pass the config object or the values deep into the code.\n This is a hacky feature introduced for quick prototyping / research exploration.\n '
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
|
def configurable(init_func):
'\n Decorate a class\'s __init__ method so that it can be called with a CfgNode\n object using the class\'s from_config classmethod.\n\n Examples:\n\n .. code-block:: python\n\n class A:\n @configurable\n def __init__(self, a, b=2, c=3):\n pass\n\n @classmethod\n def from_config(cls, cfg):\n # Returns kwargs to be passed to __init__\n return {"a": cfg.A, "b": cfg.B}\n\n a1 = A(a=1, b=2) # regular construction\n a2 = A(cfg) # construct with a cfg\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\n '
assert (init_func.__name__ == '__init__'), '@configurable should only be used for __init__!'
if init_func.__module__.startswith('detectron2.'):
assert ((init_func.__doc__ is not None) and ('experimental' in init_func.__doc__)), 'configurable {init_func} should be marked experimental'
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError:
raise AttributeError("Class with @configurable must have a 'from_config' classmethod.")
if (not inspect.ismethod(from_config_func)):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
|
def _get_args_from_config(from_config_func, *args, **kwargs):
'\n Use `from_config` to obtain explicit arguments.\n\n Returns:\n dict: arguments to be used for cls.__init__\n '
signature = inspect.signature(from_config_func)
if (list(signature.parameters.keys())[0] != 'cfg'):
raise TypeError("{from_config_func.__self__}.from_config must take 'cfg' as the first argument!")
support_var_arg = any(((param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]) for param in signature.parameters.values()))
if support_var_arg:
ret = from_config_func(*args, **kwargs)
else:
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if (name not in supported_arg_names):
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
ret.update(extra_kwargs)
return ret
|
def _called_with_cfg(*args, **kwargs):
'\n Returns:\n bool: whether the arguments contain CfgNode and should be considered\n forwarded to from_config.\n '
if (len(args) and isinstance(args[0], _CfgNode)):
return True
if isinstance(kwargs.pop('cfg', None), _CfgNode):
return True
return False
|
def list_to_dict(list):
'\n\n :param list: input list\n :return: converted dictionary\n '
result_dict = {str(i): val for (i, val) in enumerate(list)}
return result_dict
|
class BaseDataset(Dataset):
def __init__(self, root, mode='train', resize_mode=None, resize_shape=None):
self.resize_mode = ResizeMode(resize_mode)
self.resize_shape = resize_shape
self.mode = mode
self.root = root
self.samples = []
self.create_sample_list()
def normalise(self, tensors):
tensors['images'] = (tensors['images'].astype(np.float32) / 255.0)
return tensors
def is_train(self):
return (self.mode == 'train')
def pad_tensors(self, tensors_resized):
(h, w) = tensors_resized['images'].shape[:2]
new_h = (((h + 32) - (h % 32)) if ((h % 32) > 0) else h)
new_w = (((w + 32) - (w % 32)) if ((w % 32) > 0) else w)
(lh, uh) = (((new_h - h) / 2), (((new_h - h) / 2) + ((new_h - h) % 2)))
(lw, uw) = (((new_w - w) / 2), (((new_w - w) / 2) + ((new_w - w) % 2)))
(lh, uh, lw, uw) = (int(lh), int(uh), int(lw), int(uw))
padded_tensors = {}
for (key, tensor) in tensors_resized.items():
if (tensor.ndim == 2):
tensor = tensor[(..., None)]
assert (tensor.ndim == 3)
padded_tensors[key] = np.pad(tensor, ((lh, uh), (lw, uw), (0, 0)), mode='constant')
return padded_tensors
def read_sample(self, sample):
images = self.read_image(sample)
targets = self.read_target(sample)
images_resized = []
targets_resized = []
for (im, t) in zip(images, targets):
data = {'image': im, 'mask': t}
data = resize(data, self.resize_mode, self.resize_shape)
images_resized += [data['image']]
targets_resized += [data['mask']]
images = np.stack(images_resized)
targets = np.stack(targets_resized)
data = {'images': images, 'targets': targets}
for (key, val) in sample.items():
if (key in ['images', 'targets']):
continue
if (key in data):
data[key] += [val]
else:
data[key] = [val]
return data
def read_target(self, sample):
return map((lambda x: np.array(Image.open(x).convert('P'), dtype=np.uint8)), sample['targets'])
def read_image(self, sample):
return map(imread, sample['images'])
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
sample = self.samples[idx]
tensors_resized = self.read_sample(sample)
padded_tensors = self.pad_tensors(tensors_resized)
padded_tensors = self.normalise(padded_tensors)
return {'images': [np.transpose(padded_tensors['img1'], (2, 0, 1)).astype(np.float32), np.transpose(padded_tensors['img2'], (2, 0, 1)).astype(np.float32)], 'target': {'flow': np.transpose(padded_tensors['flow'], (2, 0, 1)).astype(np.float32)}, 'info': {}}
@abstractmethod
def create_sample_list(self):
pass
|
class VideoDataset(BaseDataset):
def __init__(self, root, mode='train', resize_mode=None, resize_shape=None, tw=8, max_temporal_gap=8, num_classes=2):
self.tw = tw
self.max_temporal_gap = max_temporal_gap
self.num_classes = num_classes
self.videos = []
self.num_frames = {}
self.num_objects = {}
self.shape = {}
self.current_video = None
self.start_index = None
super(VideoDataset, self).__init__(root, mode, resize_mode, resize_shape)
def set_video_id(self, video):
self.current_video = video
self.start_index = self.get_start_index(video)
def get_video_ids(self):
return (random.sample(self.videos, len(self.videos)) if self.is_train() else self.videos)
def get_start_index(self, video):
start_frame = 0
return start_frame
def pad_tensors(self, tensors_resized):
(h, w) = tensors_resized['images'].shape[1:3]
new_h = (((h + 32) - (h % 32)) if ((h % 32) > 0) else h)
new_w = (((w + 32) - (w % 32)) if ((w % 32) > 0) else w)
(lh, uh) = (((new_h - h) / 2), (((new_h - h) / 2) + ((new_h - h) % 2)))
(lw, uw) = (((new_w - w) / 2), (((new_w - w) / 2) + ((new_w - w) % 2)))
(lh, uh, lw, uw) = (int(lh), int(uh), int(lw), int(uw))
padded_tensors = tensors_resized.copy()
keys = ['images', 'targets']
for key in keys:
pt = []
t = tensors_resized[key]
if (t.ndim == 3):
t = t[(..., None)]
assert (t.ndim == 4)
padded_tensors[key] = np.pad(t, ((0, 0), (lh, uh), (lw, uw), (0, 0)), mode='constant')
padded_tensors['info'][0]['pad'] = ((lh, uh), (lw, uw))
return padded_tensors
def __getitem__(self, idx):
sample = self.samples[idx]
tensors_resized = self.read_sample(sample)
padded_tensors = self.pad_tensors(tensors_resized)
padded_tensors = self.normalise(padded_tensors)
return {'images': np.transpose(padded_tensors['images'], (3, 0, 1, 2)).astype(np.float32), 'target': {'mask': np.transpose(padded_tensors['targets'], (3, 0, 1, 2)).astype(np.float32)}, 'info': padded_tensors['info']}
@abstractmethod
def get_support_indices(self, index, sequence):
pass
|
class Davis(VideoDataset):
def __init__(self, root, mode='train', resize_mode=None, resize_shape=None, tw=8, max_temporal_gap=8, num_classes=2, imset=None):
self.imset = imset
self.videos = []
self.num_frames = {}
self.num_objects = {}
self.shape = {}
self.raw_samples = []
super(Davis, self).__init__(root, mode, resize_mode, resize_shape, tw, max_temporal_gap, num_classes)
def filter_samples(self, video):
filtered_samples = [s for s in self.raw_samples if (s[INFO]['video'] == video)]
self.samples = filtered_samples
def set_video_id(self, video):
self.current_video = video
self.start_index = self.get_start_index(video)
self.filter_samples(video)
def get_video_ids(self):
return (random.sample(self.videos, len(self.videos)) if self.is_train() else self.videos)
def get_support_indices(self, index, sequence):
if self.is_train():
index_range = np.arange(index, min(self.num_frames[sequence], (index + max(self.max_temporal_gap, self.tw))))
else:
index_range = np.arange(index, min(self.num_frames[sequence], (index + self.tw)))
support_indices = np.random.choice(index_range, min(self.tw, len(index_range)), replace=False)
support_indices = np.sort(np.append(support_indices, np.repeat([index], (self.tw - len(support_indices)))))
return support_indices
def create_sample_list(self):
image_dir = os.path.join(self.root, 'JPEGImages', '480p')
mask_dir = os.path.join(self.root, 'Annotations_unsupervised', '480p')
if self.is_train():
_imset_f = '2017/train.txt'
elif self.imset:
_imset_f = self.imset
else:
_imset_f = '2017/val.txt'
with open(os.path.join(self.root, 'ImageSets', _imset_f), 'r') as lines:
for line in lines:
_video = line.rstrip('\n')
self.videos += [_video]
img_list = list(glob.glob(os.path.join(image_dir, _video, '*.jpg')))
img_list.sort()
num_frames = len(glob.glob(os.path.join(image_dir, _video, '*.jpg')))
self.num_frames[_video] = num_frames
_mask_file = os.path.join(mask_dir, _video, '00000.png')
_mask = np.array(Image.open(os.path.join(mask_dir, _video, '00000.png')).convert('P'))
num_objects = np.max(_mask)
self.num_objects[_video] = num_objects
self.shape[_video] = np.shape(_mask)
for (i, img) in enumerate(img_list):
sample = {INFO: {}, IMAGES_: [], TARGETS: []}
support_indices = self.get_support_indices(i, _video)
sample[INFO]['support_indices'] = support_indices
images = [os.path.join(image_dir, _video, '{:05d}.jpg'.format(s)) for s in np.sort(support_indices)]
targets = [os.path.join(mask_dir, _video, '{:05d}.png'.format(s)) for s in np.sort(support_indices)]
sample[IMAGES_] = images
sample[TARGETS] = targets
sample[INFO]['video'] = _video
sample[INFO]['num_frames'] = num_frames
sample[INFO]['num_objects'] = num_objects
sample[INFO]['shape'] = np.shape(_mask)
self.samples += [sample]
self.raw_samples = self.samples
|
class FBMSDataset(Davis):
def __init__(self, root, mode='train', resize_mode=None, resize_shape=None, tw=8, max_temporal_gap=8, num_classes=2):
self.index_length = {}
self.gt_frames = {}
self.video_frames = {}
super(FBMSDataset, self).__init__(root, mode, resize_mode, resize_shape, tw, max_temporal_gap, num_classes)
def get_support_indices(self, index, sequence):
if self.is_train():
index_range = np.arange(index, min(self.num_frames[sequence], (index + max(self.max_temporal_gap, self.tw))))
else:
index_range = np.arange(index, min((max(self.video_frames[sequence]) + 1), (index + self.tw)))
support_indices = np.random.choice(index_range, min(self.tw, len(index_range)), replace=False)
support_indices = np.sort(np.append(support_indices, np.repeat([index], (self.tw - len(support_indices)))))
return support_indices
def read_target(self, sample):
masks = []
for t in sample[TARGETS]:
if os.path.exists(t):
raw_mask = np.array(Image.open(t).convert('P'), dtype=np.uint8)
raw_mask = (raw_mask != 0).astype(np.uint8)
mask_void = (raw_mask == 255).astype(np.uint8)
raw_mask[(raw_mask == 255)] = 0
else:
raw_mask = np.zeros(sample[INFO]['shape']).astype(np.uint8)
mask_void = (np.ones_like(raw_mask) * 255).astype(np.uint8)
masks += [raw_mask]
return masks
def create_sample_list(self):
subset = ('train' if self.is_train() else 'test')
mask_dir = os.path.join(self.root, 'inst', subset)
subset = ('Trainingset' if self.is_train() else 'Testset')
image_dir = os.path.join(self.root, subset)
videos = glob.glob((image_dir + '/*'))
for _video in videos:
sequence = _video.split('/')[(- 1)]
self.videos.append(sequence)
vid_files = glob.glob(os.path.join(image_dir, sequence, '*.jpg'))
shape = imread(vid_files[0]).shape[:2]
self.index_length[sequence] = len(vid_files[0].split('/')[(- 1)].split('.')[0].split('_')[(- 1)])
self.gt_frames[sequence] = [int(f.split('/')[(- 1)].split('_')[(- 1)].split('.')[0]) for f in glob.glob(os.path.join(mask_dir, sequence, '*.png'))]
self.num_frames[sequence] = len(vid_files)
self.video_frames[sequence] = [int(f.split('/')[(- 1)].split('_')[(- 1)].split('.')[0]) for f in vid_files]
for _f in vid_files:
sample = {INFO: {}, IMAGES_: [], TARGETS: []}
index = int(os.path.splitext(os.path.basename(_f))[0].split('_')[(- 1)])
support_indices = self.get_support_indices(index, sequence)
sample[INFO]['support_indices'] = support_indices
l = self.index_length[sequence]
images = [os.path.join(image_dir, sequence, (sequence + (('_{:0' + str(l)) + 'd}.jpg').format(s))) for s in np.sort(support_indices)]
targets = [os.path.join(mask_dir, sequence, (sequence + (('_{:0' + str(l)) + 'd}.png').format(s))) for s in np.sort(support_indices)]
sample[IMAGES_] = images
sample[TARGETS] = targets
sample[INFO]['video'] = sequence
sample[INFO]['num_frames'] = len(vid_files)
sample[INFO]['num_objects'] = 1
sample[INFO]['shape'] = shape
sample[INFO]['gt_frames'] = self.gt_frames[sequence]
self.samples += [sample]
self.raw_samples = self.samples
|
def load_augmentors(args, pascal_voc_path):
if (args is None):
return
augmentors = []
if ('occ' in args):
augmentors += {'occluders': load_occluders(pascal_voc_path)}
return augmentors
|
def augment(augmentors, aug_classes, tensors):
tensors = tensors.copy()
if ('occ' in augmentors):
assert ('occluders' in aug_classes)
tensors = do_occ_aug(aug_classes['occluders'], tensors)
return tensors
|
def do_occ_aug(occluders, tensors, p=0.2):
occluded_tensors = tensors.copy()
if np.random.choice([True, False], 1, p=[p, (1 - p)]):
occluded_tensors = occlude_with_objects(occluded_tensors, occluders)
return occluded_tensors
|
def import_submodules(package_name):
package = sys.modules[package_name]
for (importer, name, is_package) in pkgutil.walk_packages(package.__path__):
if (not importer.path.startswith(package.__path__[0])):
continue
name_with_package = ((package_name + '.') + name)
importlib.import_module(name_with_package)
if is_package:
import_submodules(name_with_package)
|
def generate_clip_from_image(raw_frame, raw_mask, temporal_window, **kwargs):
'\n\n :param raw_frame: The frame to be augmented: h x w x 3\n :param raw_mask: h x w x 1\n :param temporal_window: Number of frames in the output clip\n :return: clip_frames - list of frames with values 0-255\n clip_masks - corresponding masks\n '
global TRANSLATION, ROTATION, SHEAR
if ('translation' in kwargs):
TRANSLATION = kwargs['translation']
if ('rotation' in kwargs):
ROTATION = kwargs['rotation']
if ('shear' in kwargs):
SHEAR = kwargs['shear']
clip_frames = np.repeat(raw_frame[np.newaxis], temporal_window, axis=0)
clip_masks = np.repeat(raw_mask[np.newaxis], temporal_window, axis=0)
sometimes = (lambda aug: iaa.Sometimes(0.05, aug))
blur = sometimes(iaa.OneOf([iaa.GaussianBlur((0.0, 0.5))]))
seq = iaa.Sequential([sometimes(iaa.ElasticTransformation(alpha=(200, 220), sigma=(17.0, 19.0))), iaa.Affine(scale={'x': (0.7, 1.3), 'y': (0.7, 1.3)}, translate_percent={'x': ((- TRANSLATION), TRANSLATION), 'y': ((- TRANSLATION), TRANSLATION)}, rotate=((- ROTATION), ROTATION), shear=((- SHEAR), SHEAR), mode='edge')], random_order=True)
frame_aug = raw_frame[np.newaxis]
mask_aug = raw_mask[np.newaxis]
for t in range((temporal_window - 1)):
(frame_aug, mask_aug) = seq(images=frame_aug.astype(np.uint8), segmentation_maps=mask_aug.astype(np.uint8))
frame_aug = blur(images=frame_aug)
clip_frames[(t + 1)] = frame_aug[0]
clip_masks[(t + 1)] = mask_aug[0]
return (clip_frames, clip_masks)
|
class VisalDataset(Davis):
def __init__(self, root, mode='train', resize_mode=None, resize_shape=None, tw=8, max_temporal_gap=8, num_classes=2, imset=None):
self.gt_frames = {}
self.video_frames = {}
super(VisalDataset, self).__init__(root, mode, resize_mode, resize_shape, tw, max_temporal_gap, num_classes)
def get_current_sequence(self, img_file):
sequence = img_file.split('/')[(- 2)]
return sequence
def get_support_indices(self, index, sequence):
if self.is_train():
index_range = np.arange(index, min(self.num_frames[sequence], (index + max(self.max_temporal_gap, self.tw))))
else:
index_range = np.arange(index, min(self.num_frames[sequence], (index + self.tw)))
support_indices = np.random.choice(index_range, min(self.tw, len(index_range)), replace=False)
support_indices = np.sort(np.append(support_indices, np.repeat([index], (self.tw - len(support_indices)))))
return support_indices
def read_target(self, sample):
masks = []
for t in sample[TARGETS]:
if os.path.exists(t):
raw_mask = np.array(Image.open(t).convert('P'), dtype=np.uint8)
raw_mask = (raw_mask != 0).astype(np.uint8)
mask_void = (raw_mask == 255).astype(np.uint8)
raw_mask[(raw_mask == 255)] = 0
else:
raw_mask = np.zeros(sample[INFO]['shape']).astype(np.uint8)
mask_void = (np.ones_like(raw_mask) * 255).astype(np.uint8)
masks += [raw_mask]
return masks
def create_sample_list(self):
image_dir = os.path.join(self.root, 'ViSal')
mask_dir = os.path.join(self.root, 'GroundTruth')
assert os.path.exists(image_dir), 'Images directory not found at expected path: {}'.format(image_dir)
assert os.path.exists(mask_dir), 'Ground truth directory not found at expected path: {}'.format(mask_dir)
types = ('/*.bmp', '/*.png')
mask_fnames = []
for type in types:
mask_fnames += sorted(glob.glob((mask_dir + type)))
mask_fnames = [fname.split('/')[(- 1)] for fname in mask_fnames]
for _video in SEQ_NAMES:
self.videos.append(_video)
seq_images_dir = os.path.join(image_dir, _video)
assert os.path.exists(seq_images_dir), 'Images directory not found at expected path: {}'.format(seq_images_dir)
print('Reading Sequence {}'.format(_video))
if (_video in ('gokart', 'snow_leopards')):
regex_pattern = (_video + '[^a-zA-Z]')
else:
regex_pattern = (_video + '[^a-zA-Z0-9]')
vid_files = []
for type in types:
vid_files += sorted(glob.glob((seq_images_dir + type)))
seq_mask_fnames = sorted(filter((lambda f: re.match(regex_pattern, f)), mask_fnames))
self.gt_frames[_video] = [int(i) for (i, f) in enumerate(vid_files) if (f.split('/')[(- 1)] in seq_mask_fnames)]
assert (len(self.gt_frames[_video]) == len(seq_mask_fnames))
self.num_frames[_video] = len(vid_files)
self.video_frames[_video] = vid_files
shape = imread(vid_files[0]).shape[:2]
for (i, _f) in enumerate(vid_files):
sample = {INFO: {}, IMAGES_: [], TARGETS: []}
sequence = self.get_current_sequence(_f)
index = self.video_frames[sequence].index(_f)
support_indices = self.get_support_indices(index, _video)
sample[INFO]['support_indices'] = support_indices
images = [self.video_frames[_video][s] for s in np.sort(support_indices)]
targets = [os.path.join(mask_dir, img_file.split('/')[(- 1)]) for img_file in images]
sample[IMAGES_] = images
sample[TARGETS] = targets
sample[INFO]['video'] = _video
sample[INFO]['num_frames'] = len(vid_files)
sample[INFO]['num_objects'] = 1
sample[INFO]['shape'] = shape
sample[INFO]['gt_frames'] = self.gt_frames[_video]
self.samples += [sample]
self.raw_samples = self.samples
|
class YoutubeVOS(VideoDataset):
def __init__(self, root, mode='train', resize_mode=None, resize_shape=None, tw=8, max_temporal_gap=8, num_classes=2):
self.videos = []
self.num_frames = {}
self.num_objects = {}
self.shape = {}
self.raw_samples = []
self.video_frames = {}
super(YoutubeVOS, self).__init__(root, mode, resize_mode, resize_shape, tw, max_temporal_gap, num_classes)
def filter_samples(self, video):
filtered_samples = [s for s in self.raw_samples if (s[INFO]['video'] == video)]
self.samples = filtered_samples
def get_support_indices(self, index, sequence):
sample_list = self.video_frames[sequence]
sample_list.sort()
start_index = sample_list.index(index)
end_index = min(len(sample_list), (start_index + self.max_temporal_gap))
sample_list = sample_list[start_index:end_index]
support_indices = np.random.choice(sample_list, min(self.tw, len(sample_list)), replace=False)
support_indices = np.sort(np.append(support_indices, np.repeat([index], (self.tw - len(support_indices)))))
support_indices.sort()
return support_indices.astype(np.int)
def create_sample_list(self):
imset = ('train' if self.is_train() else 'valid')
image_dir = os.path.join(self.root, imset, 'JPEGImages')
_videos = glob.glob((image_dir + '/*'))
for line in _videos:
_video = line.split('/')[(- 1)]
self.videos += [_video]
img_list = list(glob.glob(os.path.join(image_dir, _video, '*.jpg')))
mask_dir = os.path.join(self.root, imset, 'CleanedAnnotations')
if os.path.exists(os.path.join(mask_dir, _video)):
mask_list = list(glob.glob(os.path.join(mask_dir, _video, '*.png')))
else:
mask_dir = os.path.join(self.root, imset, 'Annotations')
mask_list = list(glob.glob(os.path.join(mask_dir.replace('CleanedAnnotations', 'Annotations'), _video, '*.png')))
img_list.sort()
mask_list.sort()
self.video_frames[_video] = [int(os.path.splitext(os.path.basename(f))[0]) for f in img_list]
num_frames = len(img_list)
self.num_frames[_video] = num_frames
_mask = np.shape(np.array(Image.open(mask_list[0]).convert('P')))
num_objects = np.max(_mask)
self.num_objects[_video] = num_objects
self.shape[_video] = np.shape(_mask)
for f_index in self.video_frames[_video]:
sample = {INFO: {}, IMAGES_: [], TARGETS: []}
support_indices = self.get_support_indices(f_index, _video)
sample[INFO]['support_indices'] = support_indices
images = [os.path.join(image_dir, _video, '{:05d}.jpg'.format(s)) for s in np.sort(support_indices)]
targets = [os.path.join(mask_dir, _video, '{:05d}.png'.format(s)) for s in np.sort(support_indices)]
sample[IMAGES_] = images
sample[TARGETS] = targets
sample[INFO]['video'] = _video
sample[INFO]['num_frames'] = num_frames
sample[INFO]['num_objects'] = num_objects
sample[INFO]['shape'] = np.shape(_mask)
self.samples += [sample]
self.raw_samples = self.samples
|
def get_inference_engine(cfg):
engines = all_subclasses(BaseInferenceEngine)
try:
class_index = [cls.__name__ for cls in engines].index(cfg.INFERENCE.ENGINE)
except:
raise ValueError('Inference engine {} not found.'.format(cfg.INFERENCE.ENGINE))
engine = list(engines)[class_index]
return engine(cfg)
|
class Trainer():
def __init__(self, args, port):
cfg = get_cfg()
cfg.merge_from_file(args.config)
self.cfg = cfg
self.port = port
assert os.path.exists('saved_models'), 'Create a path to save the trained models: <default: ./saved_models> '
self.model_dir = os.path.join('saved_models', cfg.NAME)
self.writer = SummaryWriter(log_dir=os.path.join(self.model_dir, 'summary'))
self.iteration = 0
print('Arguments used: {}'.format(args), flush=True)
(self.trainset, self.testset) = get_datasets(cfg)
self.model = get_model(cfg)
print('Using model: {}'.format(self.model.__class__), flush=True)
if (torch.cuda.is_available() and (torch.cuda.device_count() > 1)):
(self.model, self.optimiser) = self.init_distributed(cfg)
elif torch.cuda.is_available():
(self.model, self.optimiser) = self.init_distributed(cfg)
else:
raise RuntimeError('CUDA not available.')
self.lr_schedulers = get_lr_schedulers(self.optimiser, cfg, self.start_epoch)
self.batch_size = self.cfg.TRAINING.BATCH_SIZE
args.world_size = 1
print(args)
self.args = args
self.epoch = 0
self.best_loss_train = math.inf
self.losses = AverageMeterDict()
self.ious = AverageMeterDict()
num_samples = (None if (cfg.DATALOADER.NUM_SAMPLES == (- 1)) else cfg.DATALOADER.NUM_SAMPLES)
if (torch.cuda.device_count() > 1):
self.train_sampler = torch.utils.data.distributed.DistributedSampler(torch.utils.data.RandomSampler(self.trainset, replacement=True, num_samples=num_samples), shuffle=True)
else:
self.train_sampler = (torch.utils.data.RandomSampler(self.trainset, replacement=True, num_samples=num_samples) if (num_samples is not None) else None)
shuffle = (True if (self.train_sampler is None) else False)
self.trainloader = DataLoader(self.trainset, batch_size=self.batch_size, num_workers=cfg.DATALOADER.NUM_WORKERS, shuffle=shuffle, sampler=self.train_sampler)
print(summary(self.model, tuple((3, cfg.INPUT.TW, 256, 256)), batch_size=1))
def init_distributed(self, cfg):
torch.cuda.set_device(args.local_rank)
init_torch_distributed(self.port)
model = apex.parallel.convert_syncbn_model(self.model)
model.cuda()
optimiser = get_optimiser(model, cfg)
(model, optimiser, self.start_epoch, self.iteration) = load_weightsV2(model, optimiser, args.wts, self.model_dir)
opt_levels = {'fp32': 'O0', 'fp16': 'O2', 'mixed': 'O1'}
if (cfg.TRAINING.PRECISION in opt_levels):
opt_level = opt_levels[cfg.TRAINING.PRECISION]
else:
opt_level = opt_levels['fp32']
print('WARN: Precision string is not understood. Falling back to fp32')
(model, optimiser) = amp.initialize(model, optimiser, opt_level=opt_level)
if (torch.cuda.device_count() > 1):
model = apex.parallel.DistributedDataParallel(model, delay_allreduce=True)
self.world_size = torch.distributed.get_world_size()
print('Intitialised distributed with world size {} and rank {}'.format(self.world_size, args.local_rank))
return (model, optimiser)
def train(self):
batch_time = AverageMeter()
data_time = AverageMeter()
self.model.train()
self.ious.reset()
self.losses.reset()
end = time.time()
for (i, input_dict) in enumerate(self.trainloader):
input = input_dict['images']
target_dict = dict([(k, t.float().cuda()) for (k, t) in input_dict['target'].items()])
if ('masks_guidance' in input_dict):
masks_guidance = input_dict['masks_guidance']
masks_guidance = masks_guidance.float().cuda()
else:
masks_guidance = None
info = input_dict['info']
data_time.update((time.time() - end))
input_var = input.float().cuda()
pred = self.model(input_var, masks_guidance)
pred = format_pred(pred)
in_dict = {'input': input_var, 'guidance': masks_guidance}
loss_dict = compute_loss(in_dict, pred, target_dict, self.cfg)
total_loss = loss_dict['total_loss']
self.optimiser.zero_grad()
with amp.scale_loss(total_loss, self.optimiser) as scaled_loss:
scaled_loss.backward()
self.optimiser.step()
self.iteration += 1
if (torch.cuda.device_count() > 1):
reduced_loss = dict([(key, reduce_tensor(val, self.world_size).data.item()) for (key, val) in loss_dict.items()])
else:
reduced_loss = dict([(key, val.data.item()) for (key, val) in loss_dict.items()])
self.losses.update(reduced_loss)
for (k, v) in self.losses.val.items():
self.writer.add_scalar('loss_{}'.format(k), v, self.iteration)
if args.show_image_summary:
show_image_summary(self.iteration, self.writer, in_dict, target_dict, pred)
torch.cuda.synchronize()
batch_time.update(((time.time() - end) / args.print_freq))
end = time.time()
loss_str = ' '.join(['{}:{:4f}({:4f})'.format(k, self.losses.val[k], self.losses.avg[k]) for (k, v) in self.losses.val.items()])
if (args.local_rank == 0):
print('[Iter: {0}]Epoch: [{1}][{2}/{3}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData Time {data_time.val:.3f} ({data_time.avg:.3f})\tLOSSES - {loss})\t'.format(self.iteration, self.epoch, ((i * self.world_size) * self.batch_size), ((len(self.trainloader) * self.batch_size) * self.world_size), ((self.world_size * self.batch_size) / batch_time.val), ((self.world_size * self.batch_size) / batch_time.avg), batch_time=batch_time, data_time=data_time, loss=loss_str), flush=True)
if ((self.iteration % 10000) == 0):
if (not os.path.exists(self.model_dir)):
os.makedirs(self.model_dir)
save_name = '{}/{}.pth'.format(self.model_dir, self.iteration)
save_checkpointV2(self.epoch, self.iteration, self.model, self.optimiser, save_name)
if (args.local_rank == 0):
print('Finished Train Epoch {} Loss {losses.avg}'.format(self.epoch, losses=self.losses), flush=True)
return self.losses.avg
def eval(self):
batch_time = AverageMeter()
losses = AverageMeterDict()
count = 0
self.model.eval()
end = time.time()
print('Starting validation for epoch {}'.format(self.epoch), flush=True)
for seq in self.testset.get_video_ids():
self.testset.set_video_id(seq)
if (torch.cuda.device_count() > 1):
test_sampler = torch.utils.data.distributed.DistributedSampler(self.testset, shuffle=False)
else:
test_sampler = None
testloader = DataLoader(self.testset, batch_size=1, num_workers=1, shuffle=False, sampler=test_sampler, pin_memory=True)
losses_video = AverageMeterDict()
for (i, input_dict) in enumerate(testloader):
with torch.no_grad():
input = input_dict['images']
target_dict = dict([(k, t.float().cuda()) for (k, t) in input_dict['target'].items()])
if ('masks_guidance' in input_dict):
masks_guidance = input_dict['masks_guidance']
masks_guidance = masks_guidance.float().cuda()
else:
masks_guidance = None
info = input_dict['info']
input_var = input.float().cuda()
pred = self.model(input_var, masks_guidance)
pred = format_pred(pred)
in_dict = {'input': input_var, 'guidance': masks_guidance}
loss_dict = compute_loss(in_dict, pred, target_dict, self.cfg)
total_loss = loss_dict['total_loss']
self.iteration += 1
if (torch.cuda.device_count() > 1):
reduced_loss = dict([(key, reduce_tensor(val, self.world_size).data.item()) for (key, val) in loss_dict.items()])
else:
reduced_loss = dict([(key, val.data.item()) for (key, val) in loss_dict.items()])
count = (count + 1)
losses_video.update(reduced_loss, args.world_size)
losses.update(reduced_loss, args.world_size)
for (k, v) in losses.val.items():
self.writer.add_scalar('loss_{}'.format(k), v, self.iteration)
torch.cuda.synchronize()
batch_time.update(((time.time() - end) / args.print_freq))
end = time.time()
if (args.local_rank == 0):
loss_str = ' '.join(['{}:{:4f}({:4f})'.format(k, losses_video.val[k], losses_video.avg[k]) for (k, v) in losses_video.val.items()])
print('{0}: [{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLOSSES - {loss})\t'.format(info[0]['video'], (i * args.world_size), (len(testloader) * args.world_size), batch_time=batch_time, loss=loss_str), flush=True)
if (args.local_rank == 0):
loss_str = ' '.join(['{}:{:4f}({:4f})'.format(k, losses.val[k], losses.avg[k]) for (k, v) in losses.val.items()])
print('Finished Test: Loss --> loss {}'.format(loss_str), flush=True)
return losses.avg
def start(self):
if (args.task == 'train'):
start_epoch = self.epoch
for epoch in range(start_epoch, self.cfg.TRAINING.NUM_EPOCHS):
self.epoch = epoch
if (self.train_sampler is not None):
self.train_sampler.set_epoch(epoch)
loss_mean = self.train()
for lr_scheduler in self.lr_schedulers:
lr_scheduler.step(epoch)
if (args.local_rank == 0):
print('Total Loss {}'.format(loss_mean))
if (loss_mean['total_loss'] < self.best_loss_train):
if (not os.path.exists(self.model_dir)):
os.makedirs(self.model_dir)
self.best_loss_train = (loss_mean['total_loss'] if (loss_mean['total_loss'] < self.best_loss_train) else self.best_loss_train)
save_name = '{}/{}.pth'.format(self.model_dir, 'model_best_train')
save_checkpointV2(epoch, self.iteration, self.model, self.optimiser, save_name)
val_loss = self.eval()
elif (args.task == 'eval'):
self.eval()
elif (args.task == 'infer'):
inference_engine = get_inference_engine(self.cfg)
inference_engine.infer(self.testset, self.model)
else:
raise ValueError('Unknown task {}'.format(args.task))
def backup_session(self, signalNumber, _):
if (is_main_process() and (self.args.task == 'train')):
save_name = '{}/{}_{}.pth'.format(self.model_dir, 'checkpoint', self.iteration)
print('Received signal {}. \nSaving model to {}'.format(signalNumber, save_name))
save_checkpointV2(self.epoch, self.iteration, self.model, self.optimiser, save_name)
synchronize()
cleanup_env()
exit(1)
|
def register_interrupt_signals(trainer):
signal.signal(signal.SIGHUP, trainer.backup_session)
signal.signal(signal.SIGINT, trainer.backup_session)
signal.signal(signal.SIGQUIT, trainer.backup_session)
signal.signal(signal.SIGILL, trainer.backup_session)
signal.signal(signal.SIGTRAP, trainer.backup_session)
signal.signal(signal.SIGABRT, trainer.backup_session)
signal.signal(signal.SIGBUS, trainer.backup_session)
signal.signal(signal.SIGALRM, trainer.backup_session)
signal.signal(signal.SIGTERM, trainer.backup_session)
|
class DecoderWithEmbedding(Decoder3d):
def __init__(self, n_classes=2, e_dim=64, add_spatial_coord=True):
super(DecoderWithEmbedding, self).__init__(n_classes)
self.embedding_head = NonlocalOffsetEmbeddingHead(256, 128, e_dim, downsampling_factor=2, add_spatial_coord=add_spatial_coord)
def forward(self, r5, r4, r3, r2, support):
x = self.GC(r5)
r = self.convG1(F.relu(x))
r = self.convG2(F.relu(r))
m5 = (x + r)
m4 = self.RF4(r4, m5)
m3 = self.RF3(r3, m4)
m2 = self.RF2(r2, m3)
e = self.embedding_head(F.interpolate(F.relu(m2), scale_factor=(1, 0.5, 0.5), mode='trilinear'))
p2 = self.pred2(F.relu(m2))
p = F.interpolate(p2, scale_factor=(1, 4, 4), mode='trilinear')
return (p, e, m2)
|
class DecoderSegmentEmbedding(DecoderWithEmbedding):
def __init__(self, n_classes=2, e_dim=64):
super(DecoderSegmentEmbedding, self).__init__(n_classes=n_classes, e_dim=e_dim)
self.con1x1 = nn.Conv3d(e_dim, 256, kernel_size=1, padding=1)
def forward(self, r5, r4, r3, r2, support):
x = self.GC(r5)
r = self.convG1(F.relu(x))
r = self.convG2(F.relu(r))
m5 = (x + r)
m4 = self.RF4(r4, m5)
m3 = self.RF3(r3, m4)
m2 = self.RF2(r2, m3)
e = self.embedding_head(F.interpolate(m3, scale_factor=(2, 1, 1), mode='trilinear'))
e_unrolled = self.con1x1(F.relu(e))
p2 = self.pred2((F.relu(m2) + F.interpolate(e_unrolled, m2.shape[2:], mode='trilinear')))
p = F.interpolate(p2, scale_factor=(1, 4, 4), mode='trilinear')
return (p, e, m2)
|
class DecoderEmbedding(Decoder3d):
def __init__(self, n_classes=2, e_dim=3, add_spatial_coord=True, scale=0.5):
super(DecoderEmbedding, self).__init__(n_classes=n_classes)
self.RF4 = Refine3dConvTranspose(1024, 256)
self.RF3 = Refine3dConvTranspose(512, 256)
self.RF2 = Refine3dConvTranspose(256, 256)
|
class DecoderLight(Decoder3d):
def __init__(self, n_classes=2, conv_t=False):
super(DecoderLight, self).__init__(n_classes=n_classes)
self.RF4 = Refine3dLight(1024, 256, conv_t=conv_t)
self.RF3 = Refine3dLight(512, 256, conv_t=conv_t)
self.RF2 = Refine3dLight(256, 256, conv_t=conv_t)
|
class DecoderMultiClass(Decoder3d):
def __init__(self, n_classes=2, conv_t=False):
super(DecoderMultiClass, self).__init__(n_classes=n_classes)
self.pred_fg = nn.Conv3d(256, 2, kernel_size=3, padding=1, stride=1)
def forward(self, r5, r4, r3, r2, support):
x = self.GC(r5)
r = self.convG1(F.relu(x))
r = self.convG2(F.relu(r))
m5 = (x + r)
m4 = self.RF4(r4, m5)
m3 = self.RF3(r3, m4)
m2 = self.RF2(r2, m3)
p_multi = self.pred2(F.relu(m2))
p_fg = self.pred_fg(F.relu(m2))
p_multi = F.interpolate(p_multi, scale_factor=(1, 4, 4), mode='trilinear')
p_fg = F.interpolate(p_fg, scale_factor=(1, 4, 4), mode='trilinear')
p = [p_fg, p_multi]
return p
|
class MultiScaleDecoder(Decoder3d):
def __init__(self, n_classes=2, add_spatial_coord=True):
super(MultiScaleDecoder, self).__init__(n_classes)
self.convG1 = nn.Conv3d(2048, 256, kernel_size=3, padding=1)
self.embedding_head = MultiscaleCombinedHeadLongTemporalWindow(256, n_classes, True, True, seed_map=True, add_spatial_coord=add_spatial_coord)
def forward(self, r5, r4, r3, r2, support):
r = self.convG1(F.relu(r5))
r = self.convG2(F.relu(r))
m5 = r
m4 = self.RF4(r4, m5)
m3 = self.RF3(r3, m4)
m2 = self.RF2(r2, m3)
(p, e) = self.embedding_head.forward([m5, m4, m3, m2])
return (p, e)
|
class Resnet3dEmbeddingMultiDecoder(Resnet3d):
def __init__(self, tw=8, sample_size=112, e_dim=7, decoders=None):
super(Resnet3dEmbeddingMultiDecoder, self).__init__(tw=tw, sample_size=sample_size)
resnet = resnet50_no_ts(sample_size=sample_size, sample_duration=tw)
self.encoder = Encoder3d(tw, sample_size, resnet=resnet)
decoders = ([Decoder3d(), DecoderEmbedding(n_classes=e_dim)] if (decoders is None) else decoders)
self.decoders = nn.ModuleList()
for decoder in decoders:
self.decoders.append(decoder)
def forward(self, x, ref=None):
(r5, r4, r3, r2) = self.encoder.forward(x, ref)
flatten = (lambda lst: ([lst] if (type(lst) is torch.Tensor) else reduce(add, [flatten(ele) for ele in lst])))
p = flatten([decoder.forward(r5, r4, r3, r2, None) for decoder in self.decoders])
return p
|
class Resnet3dChannelSeparated_ir(Resnet3dEmbeddingMultiDecoder):
def __init__(self, tw=16, sample_size=112, e_dim=7, n_classes=2, decoders=None):
decoders = ([Decoder3d(n_classes=n_classes), DecoderEmbedding(n_classes=e_dim)] if (decoders is None) else decoders)
super(Resnet3dChannelSeparated_ir, self).__init__(decoders=decoders)
self.encoder = Encoder3d_csn_ir(tw, sample_size)
|
class Resnet3dCSNiRSameDecoders(Resnet3dEmbeddingMultiDecoder):
def __init__(self, tw=16, sample_size=112, e_dim=7):
super(Resnet3dCSNiRSameDecoders, self).__init__(decoders=[Decoder3d(), Decoder3d(n_classes=e_dim)])
self.encoder = Encoder3d_csn_ir(tw, sample_size)
|
class Resnet3dCSNiRLight(Resnet3dEmbeddingMultiDecoder):
def __init__(self, tw=16, sample_size=112, e_dim=7):
super(Resnet3dCSNiRLight, self).__init__(decoders=[DecoderLight(), DecoderLight(n_classes=e_dim, conv_t=True)])
self.encoder = Encoder3d_csn_ir(tw, sample_size)
|
class Resnet3dCSNiRMultiScale(Resnet3d):
def __init__(self, tw=16, sample_size=112, e_dim=7, add_spatial_coord=True):
super(Resnet3dCSNiRMultiScale, self).__init__()
self.encoder = Encoder3d_csn_ir(tw, sample_size)
self.decoder = MultiScaleDecoder(add_spatial_coord=add_spatial_coord)
def forward(self, x, ref):
(r5, r4, r3, r2) = self.encoder.forward(x, ref)
p = self.decoder.forward(r5, r4, r3, r2, None)
return p
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.