repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
jesbarlow/CP1404_practicals | refs/heads/master | numbers = [3, 1, 4, 1, 5, 9, 2]
#numbers[0] - the value would be 3
#numbers[-1] -
#numbers[3] - the value would be 1
#numbers[:-1] -
#numbers[3:4] -
#5 in numbers - the value would be true
#7 in numbers - the value would be false
#"3" in numbers - the value would be false
#numbers + [6, 5, 3] - will print the list adding the new numbers to the end.
print(numbers[0])
print(numbers[-1])
print(numbers[3])
print(numbers[:-1])
print(numbers[3:4])
print(5 in numbers)
print(7 in numbers)
print("3" in numbers)
print(numbers + [6, 5, 3])
numbers[0] = '10'
print(numbers[0])
numbers[-1] = '1'
print(numbers[-1])
print(numbers[2:])
check_number = '9'
if 9 in numbers:
print("It's in the list")
else:
print('Not in the list') | Python | 34 | 20.558823 | 77 | /prac_4/warm_up.py | 0.648907 | 0.590164 |
jesbarlow/CP1404_practicals | refs/heads/master | items = int(input("Please enter the number of items:"))
if items <= 0:
print("Invalid number of items")
items = input("Please enter the number of items:")
prices = []
count = 0
for i in range(items):
count = count + 1
item_cost = float(input("What is the price of item {}?: $".format(count)))
prices.append(item_cost)
num = 0
for elem in prices:
num = num + 1
print("The cost of item {} is:".format(num))
print ("${:,.2f}".format(elem))
print("The total cost of all items is: $",sum(prices))
| Python | 21 | 24.190475 | 78 | /Prac_1/shop_calculator.py | 0.606285 | 0.595194 |
jesbarlow/CP1404_practicals | refs/heads/master | def main():
name = get_name()
print_name(name)
def print_name(name):
print(name[::2])
def get_name():
while True:
name = input("What is your name?: ")
if name.isalpha():
break
else:
print("Sorry, i didn't understand that.")
return name
main() | Python | 22 | 13.5 | 53 | /Prac_3/print_second_letter_name.py | 0.518868 | 0.515723 |
rlebras/pytorch-pretrained-BERT | refs/heads/master | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from examples.run_squad import _compute_softmax
from pytorch_pretrained_bert import BertForSequenceClassification
from pytorch_pretrained_bert.file_utils import read_jsonl_lines, write_items, TsvIO
from pytorch_pretrained_bert.modeling import BertForMultipleChoice
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.tokenization import printable_text, convert_to_unicode, BertTokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputExampleWithList(object):
"""A single training/test example for simple multiple choice classification."""
def __init__(self, guid, text_a, text_b, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: list. A list containing untokenized text
text_b: list. containing untokenized text associated of the same size as text_A
Only must be specified for multiple choice options.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
assert isinstance(text_a, list)
assert isinstance(text_b, list)
assert len(text_a) == len(text_b)
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.text_c = None
self.text_d = None
class InputExampleWithListFourFields(object):
"""A single training/test example for simple multiple choice classification."""
def __init__(self, guid, text_a, text_b, text_c, text_d, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: list. A list containing untokenized text
text_b: list. containing untokenized text associated of the same size as text_A
text_c: list. containing untokenized text associated of the same size as text_A
text_d: list. containing untokenized text associated of the same size as text_A
Only must be specified for multiple choice options.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
assert isinstance(text_a, list)
assert isinstance(text_b, list)
assert text_c is None or isinstance(text_c, list)
assert text_d is None or isinstance(text_d, list)
assert len(text_a) == len(text_b)
if text_c is not None:
assert len(text_c) == len(text_a)
if text_d is not None:
assert len(text_d) == len(text_a)
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.text_c = text_c
self.text_d = text_d
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_jsonl(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
records = []
with open(input_file, "r") as f:
for line in f:
obj = json.loads(line)
records.append(obj)
return records
class AnliProcessor(DataProcessor):
"""Processor for the ANLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl")))
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "valid.jsonl")), "dev")
def get_examples_from_file(self, input_file):
return self._create_examples(
self._read_jsonl(input_file), "to-pred")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, records, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, record) in enumerate(records):
guid = "%s-%s-%s" % (set_type, record['InputStoryid'], "1")
beginning = record['InputSentence1']
ending = record['InputSentence5']
option1 = record['RandomMiddleSentenceQuiz1']
option2 = record['RandomMiddleSentenceQuiz2']
answer = int(record['AnswerRightEnding']) - 1
option1_context = convert_to_unicode(' '.join([beginning, option1]))
option2_context = convert_to_unicode(' '.join([beginning, option2]))
label = convert_to_unicode(str(answer))
examples.append(
InputExampleWithListFourFields(guid=guid,
text_a=[option1_context, option2_context],
text_b=[ending, ending],
text_c=None,
text_d=None,
label=label
)
)
return examples
def label_field(self):
return "AnswerRightEnding"
class AnliProcessor3Option(DataProcessor):
"""Processor for the ANLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl")))
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "valid.jsonl")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "test.jsonl")), "test")
def get_examples_from_file(self, input_file):
return self._create_examples(
self._read_jsonl(input_file, "to-pred")
)
def get_labels(self):
"""See base class."""
return ["0", "1", "2"]
def _create_examples(self, records, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, record) in enumerate(records):
guid = "%s-%s-%s" % (set_type, record['InputStoryid'], record['ending'])
beginning = record['InputSentence1']
ending = record['InputSentence5']
option1 = record['RandomMiddleSentenceQuiz1']
option2 = record['RandomMiddleSentenceQuiz2']
option3 = record['RandomMiddleSentenceQuiz3']
answer = int(record['AnswerRightEnding']) - 1
option1_context = convert_to_unicode(' '.join([beginning, option1]))
option2_context = convert_to_unicode(' '.join([beginning, option2]))
option3_context = convert_to_unicode(' '.join([beginning, option3]))
label = convert_to_unicode(str(answer))
text_a = [option1_context, option2_context, option3_context]
text_b = [ending, ending, ending]
examples.append(
InputExampleWithList(guid=guid,
text_a=text_a,
text_b=text_b,
label=label
)
)
return examples
def label_field(self):
return "AnswerRightEnding"
class AnliWithCSKProcessor(DataProcessor):
"""Processor for the ANLI data set."""
def __init__(self):
self._labels = []
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl")))
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "valid.jsonl")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "test.jsonl")), "test")
def get_examples_from_file(self, input_file):
return self._create_examples(
self._read_jsonl(input_file, "to-pred")
)
def get_labels(self):
"""See base class."""
return [str(idx) for idx in range(16)]
def _create_examples(self, records, set_type):
"""Creates examples for the training and dev sets."""
examples = []
num_fields = len(
[x for x in list(records[0].keys()) if x.startswith('RandomMiddleSentenceQuiz')])
self._labels = [str(idx) for idx in range(1, num_fields + 1)]
for (i, record) in enumerate(records):
guid = "%s-%s-%s" % (set_type, record['InputStoryid'], record['ending'])
beginning = record['InputSentence1']
ending = record['InputSentence5']
text_a = []
text_b = []
for idx in range(1, num_fields + 1):
text_a.append(
beginning + " " + record["RandomMiddleSentenceQuiz" + str(idx)]
)
text_b.append(
ending + " Because , " + record['CSK' + str(idx)]
)
answer = int(record['AnswerRightEnding']) - 1
label = convert_to_unicode(str(answer))
examples.append(
InputExampleWithListFourFields(guid=guid,
text_a=text_a,
text_b=text_b,
text_c=None,
text_d=None,
label=label
)
)
return examples
def label_field(self):
return "AnswerRightEnding"
class WSCProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["1", "2"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
sentence = convert_to_unicode(line[1]).replace("\"", "")
conj = convert_to_unicode(line[2])
idx = sentence.index(conj)
context = sentence[:idx + len(conj)]
option_str = sentence[idx + len(conj):].strip()
name1 = convert_to_unicode(line[3])
name2 = convert_to_unicode(line[4])
option1 = option_str.replace("_", name1)
option2 = option_str.replace("_", name2)
text_a = [context, context]
text_b = [option1, option2]
label = convert_to_unicode(line[5])
examples.append(
InputExampleWithList(
guid=guid,
text_a=text_a,
text_b=text_b,
label=label
)
)
return examples
def get_examples_from_file(self, input_file):
return self._create_examples(
self._read_tsv(input_file), "to-pred")
def label_field(self):
return "answer"
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(line[3])
text_b = convert_to_unicode(line[4])
label = convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, convert_to_unicode(line[0]))
text_a = convert_to_unicode(line[8])
text_b = convert_to_unicode(line[9])
label = convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(line[3])
label = convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class BinaryAnli(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train-binary.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "valid-binary.jsonl")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, records, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, record) in enumerate(records):
guid = "%s-%s" % (set_type, i)
beginning = record['InputSentence1']
ending = record['InputSentence5']
middle = record['RandomMiddleSentenceQuiz1']
label = str(record['AnswerRightEnding'])
text_a = convert_to_unicode(beginning)
text_b = convert_to_unicode(middle + " " + ending)
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[printable_text(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def convert_examples_to_features_mc(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
features = []
for (ex_index, example) in tqdm(enumerate(examples), desc="Converting examples"):
inputs = []
tokens_a = [tokenizer.tokenize(t) for t in example.text_a]
inputs.append(tokens_a)
tokens_b = None
if example.text_b:
tokens_b = [tokenizer.tokenize(t) for t in example.text_b]
inputs.append(tokens_b)
tokens_c = None
if example.text_c:
tokens_c = [tokenizer.tokenize(t) for t in example.text_c]
inputs.append(tokens_c)
tokens_d = None
if example.text_d:
tokens_d = [tokenizer.tokenize(t) for t in example.text_d]
inputs.append(tokens_d)
if len(inputs) > 1:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
adjusted_len = max_seq_length - len(inputs) - 1
_truncate_sequences(adjusted_len, inputs)
else:
# Account for [CLS] and [SEP] with "- 2"
for idx, ta in enumerate(tokens_a):
tokens_a[idx] = tokens_a[idx][0:(max_seq_length - 2)]
all_tokens = []
all_token_ids = []
all_segments = []
all_masks = []
for zipped_tokens in zip(*inputs):
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for idx, field in enumerate(zipped_tokens):
for token in field:
tokens.append(token)
segment_ids.append(idx)
tokens.append("[SEP]")
segment_ids.append(idx)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
if len(input_ids) != max_seq_length:
print("FOUND")
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
all_tokens.append(tokens)
all_token_ids.append(input_ids)
all_segments.append(segment_ids)
all_masks.append(input_mask)
label_id = label_map[example.label]
if ex_index < 5:
logger.info("\n\n")
logger.info("*** Example {} ***\n".format(ex_index))
logger.info("guid: %s" % (example.guid))
_ts = all_tokens
_ids = all_token_ids
_masks = all_masks
_segs = all_segments
logger.info("\n")
for idx, (_t, _id, _mask, _seg) in enumerate(zip(_ts, _ids, _masks, _segs)):
logger.info("\tOption {}".format(idx))
logger.info("\ttokens: %s" % " ".join(
[printable_text(x) for x in _t]))
logger.info("\tinput_ids: %s" % " ".join([str(x) for x in _id]))
logger.info("\tinput_mask: %s" % " ".join([str(x) for x in _mask]))
logger.info(
"\tsegment_ids: %s" % " ".join([str(x) for x in _seg]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=all_token_ids,
input_mask=all_masks,
segment_ids=all_segments,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _truncate_sequences(max_length, inputs):
idx = 0
for ta, tb in zip(inputs[0], inputs[1]):
_truncate_seq_pair(ta, tb, max_length)
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def copy_optimizer_params_to_model(named_params_model, named_params_optimizer):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the parameters optimized on CPU/RAM back to the model on GPU
"""
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer,
named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
param_model.data.copy_(param_opti.data)
def set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model
"""
is_nan = False
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer,
named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
if param_model.grad is not None:
if test_nan and torch.isnan(param_model.grad).sum() > 0:
is_nan = True
if param_opti.grad is None:
param_opti.grad = torch.nn.Parameter(
param_opti.data.new().resize_(*param_opti.data.size()))
param_opti.grad.data.copy_(param_model.grad.data)
else:
param_opti.grad = None
return is_nan
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict",
default=False,
action='store_true',
help="Whether to run prediction on a given dataset.")
parser.add_argument("--input_file_for_pred",
default=None,
type=str,
help="File to run prediction on.")
parser.add_argument("--output_file_for_pred",
default=None,
type=str,
help="File to output predictions into.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument('--optimize_on_cpu',
default=False,
action='store_true',
help="Whether to perform optimization and keep the optimizer averages on CPU")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=128,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
args = parser.parse_args()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"anli": AnliProcessor,
"anli3": AnliProcessor3Option,
'anli_csk': AnliWithCSKProcessor,
'bin_anli': BinaryAnli,
'wsc': WSCProcessor
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
if args.fp16:
logger.info("16-bits training currently not supported in distributed training")
args.fp16 = False # (see https://github.com/pytorch/pytorch/pull/13496)
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu,
bool(args.local_rank != -1))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(
train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
if task_name == 'bin_anli':
model = BertForSequenceClassification.from_pretrained(args.bert_model, len(label_list))
else:
model = BertForMultipleChoice.from_pretrained(args.bert_model,
len(label_list),
len(label_list)
)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
if args.fp16:
param_optimizer = [(n, param.clone().detach().to('cpu').float().requires_grad_()) \
for n, param in model.named_parameters()]
elif args.optimize_on_cpu:
param_optimizer = [(n, param.clone().detach().to('cpu').requires_grad_()) \
for n, param in model.named_parameters()]
else:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if n not in no_decay], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if n in no_decay], 'weight_decay_rate': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_steps)
global_step = 0
model_save_path = os.path.join(args.output_dir, "bert-finetuned.model")
tr_loss = None
if args.do_train:
if task_name.lower().startswith("anli") or task_name.lower().startswith("wsc"):
train_features = convert_examples_to_features_mc(
train_examples, label_list, args.max_seq_length, tokenizer)
else:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler,
batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
status_tqdm = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(status_tqdm):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss, _ = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.fp16 and args.loss_scale != 1.0:
# rescale loss for fp16 training
# see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
loss = loss * args.loss_scale
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16 or args.optimize_on_cpu:
if args.fp16 and args.loss_scale != 1.0:
# scale down gradients for fp16 training
for param in model.parameters():
param.grad.data = param.grad.data / args.loss_scale
is_nan = set_optimizer_params_grad(param_optimizer,
model.named_parameters(), test_nan=True)
if is_nan:
logger.info("FP16 TRAINING: Nan in gradients, reducing loss scaling")
args.loss_scale = args.loss_scale / 2
model.zero_grad()
continue
optimizer.step()
copy_optimizer_params_to_model(model.named_parameters(), param_optimizer)
else:
optimizer.step()
model.zero_grad()
global_step += 1
status_tqdm.set_description_str("Iteration / Training Loss: {}".format((tr_loss /
nb_tr_examples)))
torch.save(model, model_save_path)
if args.do_eval:
if args.do_predict and args.input_file_for_pred is not None:
eval_examples = processor.get_examples_from_file(args.input_file_for_pred)
else:
eval_examples = processor.get_dev_examples(args.data_dir)
if task_name.lower().startswith("anli") or task_name.lower().startswith("wsc"):
eval_features = convert_examples_to_features_mc(
eval_examples, label_list, args.max_seq_length, tokenizer)
else:
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler,
batch_size=args.eval_batch_size)
logger.info("***** Loading model from: {} *****".format(model_save_path))
model = torch.load(model_save_path)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
eval_predictions = []
eval_pred_probs = []
logger.info("***** Predicting ... *****".format(model_save_path))
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_predictions.extend(np.argmax(logits, axis=1).tolist())
eval_pred_probs.extend([_compute_softmax(list(l)) for l in logits])
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': tr_loss / nb_tr_steps if tr_loss is not None else 0.0
}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if task_name == "wsc":
pred_examples = list(TsvIO.read(args.input_file_for_pred))
else:
pred_examples = read_jsonl_lines(args.input_file_for_pred)
logger.info("***** Eval predictions *****")
for record, pred, probs in zip(pred_examples, eval_predictions, eval_pred_probs):
record['bert_prediction'] = pred
record['bert_correct'] = pred == (int(record[processor.label_field()]) - 1)
record['bert_pred_probs'] = probs
write_items([json.dumps(r) for r in pred_examples], args.output_file_for_pred)
if __name__ == "__main__":
main()
| Python | 1,193 | 39.125732 | 117 | /examples/run_classifier.py | 0.552663 | 0.547253 |
rlebras/pytorch-pretrained-BERT | refs/heads/master | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import gzip
import csv
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set, List
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: str = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
def read_jsonl_lines(input_file: str) -> List[dict]:
with open(input_file) as f:
lines = f.readlines()
return [json.loads(l.strip()) for l in lines]
def write_items(items: List[str], output_file):
with open(output_file, 'w') as f:
for concept in items:
f.write(concept + "\n")
f.close()
class TsvIO(object):
@staticmethod
def read(filename, known_schema=None, sep="\t", gzipped=False, source=None):
"""
Read a TSV file with schema in the first line.
:param filename: TSV formatted file
:param first_line_schema: True if the first line is known to contain the schema of the
tsv file. False by default.
:param sep: Separator used in the file. Default is '\t`
:return: A list of data records where each record is a dict. The keys of the dict
correspond to the column name defined in the schema.
"""
first = True
if gzipped:
fn = gzip.open
else:
fn = open
line_num = 0
with fn(filename, 'rt') as f:
for line in f:
if first and known_schema is None:
first = False
known_schema = line.split(sep)
known_schema = [s.strip() for s in known_schema]
else:
line_num += 1
data_fields = line.split(sep)
data = {k.strip(): v.strip() for k, v in zip(known_schema, data_fields)}
data['source'] = filename if source is None else source
data['line_num'] = line_num
yield data
f.close()
@staticmethod
def make_str(item, sub_sep="\t"):
if isinstance(item, list):
return sub_sep.join([TsvIO.make_str(i) for i in item])
else:
return str(item)
@staticmethod
def write(records: List[dict], filename, schema=None, sep='\t', append=False, sub_sep=';'):
"""
Write a TSV formatted file with the provided schema
:param records: List of records to be written to the file
populated
:param filename: Output filename
:param schema: Order in which fields from the Sentence object will be written
:param sep: Separator used in the file. Default is '\t`
:param append: Whether to use append mode or write a new file
:param sub_sep: If a field contains a list of items in JSON, this seperator will be used
to separate values in the list
:return:
"""
mode = 'a' if append else 'w'
if sep == "\t":
with open(filename, mode) as f:
if schema is not None and not append:
f.write(sep.join(schema) + "\n")
for record in records:
f.write(sep.join([TsvIO.make_str(record.__getitem__(field), sub_sep=sub_sep) for
field in schema]))
f.write('\n')
f.close()
elif sep == ",":
with open(filename, mode) as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=schema)
writer.writeheader()
for record in records:
writer.writerow(record)
csvfile.close() | Python | 324 | 33.370369 | 100 | /pytorch_pretrained_bert/file_utils.py | 0.602245 | 0.596408 |
rlebras/pytorch-pretrained-BERT | refs/heads/master | from examples.run_classifier import AnliWithCSKProcessor, convert_examples_to_features_mc
from pytorch_pretrained_bert import BertTokenizer
dir = "../../abductive-nli/data/abductive_nli/one2one-correspondence/anli_with_csk/"
processor = AnliWithCSKProcessor()
examples = processor.get_train_examples(dir)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
label_list = processor.get_labels()
max_seq_length = 128
features = convert_examples_to_features_mc(examples, label_list, max_seq_length, tokenizer)
print("OK") | Python | 17 | 30.529411 | 91 | /examples/test_data_processor.py | 0.790654 | 0.783178 |
MistyW/learngit | refs/heads/master | # _*_ coding: utf-8 _*_
# __author__ = wmm
class Settings():
"""存储《外星人入侵》的所有类"""
def __init__(self):
"""初始化游戏的设置"""
# 屏幕设置
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230, 230, 230) | Python | 11 | 22.09091 | 39 | /alien_invasion_game/Settings.py | 0.490119 | 0.422925 |
AllenMkandla/oop_person | refs/heads/master | class Person:
pass
def __init__(self, name, age, gender, interests):
self.name = name
self.age = age
self.gender = gender
self.interests = interests
def hello(self):
interests_str = 'My interests are '
for pos in range(len(self.interests)):
if pos == len(self.interests) - 1:
interests_str += 'and ' + self.interests[pos] + '.'
else:
interests_str += self.interests[pos] + ', '
return 'Hello, my name is {} and I am {} years old. {}'.format(self.name, self.age, interests_str)
person = Person('Ryan', 30, "male",['being a hardarse', "agile", "ssd hard drives"]) | Python | 21 | 32.142857 | 106 | /oop.py | 0.546763 | 0.542446 |
tartaruz/Stein-saks-papir | refs/heads/master | import funk
from time import sleep
import os
clear = lambda: os.system('cls')
valg = 0
while (valg!="avslutt"):
sleep(1)
print()
funk.velkommen()
funk.meny()
print()
valg = funk.valg()
clear()
if valg=="1":
print("--------------Spiller 1's tur--------------")
pvalg=funk.choose()
p1=funk.konv(pvalg)
print("Takk! Nå er det spiller2's tur")
sleep(2)
clear()
print("--------------Spiller 2's tur--------------")
pvalg=funk.choose()
p2=funk.konv(pvalg)
funk.game(p1,p2,1)
time(5)
clear()
elif valg=="2":
print("--------------Spiller 1's tur--------------")
pvalg=funk.choose()
p=funk.konv(pvalg)
print("Du valgte",p,"! Nå er det maskinens tur")
sleep(3)
clear()
print("--------------Terminator's tur-------------")
com=funk.comp()
funk.revmaskinvalg(com)
cp=funk.konv(com)
print()
print("TERMINATOR VALGTE:",cp.upper())
funk.game(p,cp,2) #Type 2
sleep(5)
clear()
elif valg==3:
print("3")
elif valg=="help":
print("help")
c=funk.comp()
print(c)
else:
print("Wrong, try again ")
clear()
print("Farvel!")
time.sleep(10)
| Python | 55 | 23 | 60 | /stein-saks-papir.py | 0.468986 | 0.451589 |
tartaruz/Stein-saks-papir | refs/heads/master | import random
from time import sleep#for stein saks papir
def velkommen():
print("§-----------------------------------------------------------§")
print("§-----| VELKOMMEN TIL STEIN/SAKS/PAPIR! |-----§")
print("§-----------------------------------------------------------§")
print()
def valg(): #velg
valget=str(input("Kommando: "))
return valget
def valgmeny():
print(" _______")
print("---' ____)")
print(" (_____)")
print(" [1] (_____)")
print(" STEIN(____)")
print("---.__(___)")
print(" _______")
print("---' ____)____")
print(" ______)")
print(" [2] __________)")
print(" SAKS(____)")
print("---.__(___)")
print(" _______")
print("---' ____)____")
print(" ______)")
print(" [3] _______)")
print(" PAPIR _______)")
print("---.__________)")
def revmaskinvalg(hvilken):
if hvilken==1:
print(" _______ ")
print(" ( ____ '---")
print("(_____) ")
print("(_____) ")
print("(____) ")
print(" (___)__.---- ")
elif hvilken==2:
print(" _______ ")
print(" ____(____ '----")
print("(______ ")
print("(__________ ")
print(" (____) ")
print(" (___)__.---")
else:
print(" _______ ")
print(" ____(____ '---")
print(" (______ ")
print("(_______ ")
print("(_______ ")
print(" (__________.--- ")
def choose():
valgmeny()
valg=eval(input("Velg[1-2-3]: "))
return valg
def meny():
print("1: 1vs1")
print("2: 1vsCom")
print("3: Help")
print("4: Avslutt")
def comp():
ran=random.randint(1,3)
return ran
def konv(valg):
if valg==1:
res="stein"
elif valg==2:
res="saks"
else:
res="papir"
return res
def game(valg1,valg2,Gtype): #Gtype viser funksjon hva slags game det er
if Gtype==1:
spiller="spiller 2"
else:
spiller="maskinen"
if valg1==valg2:
print("DRAW! Ingen vinnere!")
elif valg1=="stein":
if valg2=="saks":
print("Spiller 1 vant mot",spiller)
else:
print("Spiller 1 tapte mot",spiller)
elif valg1=="saks":
if valg2=="papir":
print("Spiller 1 vant mot",spiller)
else:
print("Spiller 1 tapte mot",spiller)
else: #papir
if valg2=="stein":
print("Spiller 1 vant mot",spiller)
else:
print("Spiller 1 tapte mot",spiller)
| Python | 112 | 23.258928 | 74 | /funk.py | 0.361499 | 0.348273 |
guanjz20/MM21_FME_solution | refs/heads/master | SAMM_ROOT = '/data/gjz_mm21/SAMM'
CASME_2_LABEL_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/labels'
# kernel path
GAUSS_KERNEL_PATH = {
'sm_kernel': '/home/gjz/lry_kernels/gauss2D-smooth.npy',
'dr1_kernel': '/home/gjz/lry_kernels/gauss1D-derivative1.npy',
'dr2_kernel': '/home/gjz/lry_kernels/gauss1D-derivative2.npy'
} | Python | 9 | 40 | 101 | /dataset/params.py | 0.722826 | 0.684783 |
guanjz20/MM21_FME_solution | refs/heads/master | import os
import sys
import cv2
from timm.utils import reduce_tensor
import torch
import shutil
import numpy as np
import os.path as osp
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.distributed as dist
from torch.nn.modules import loss
from datetime import datetime
import paths
import dataset.utils as dataset_utils
sys.setrecursionlimit(10000)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Focal_Loss(torch.nn.Module):
def __init__(self, alpha=[], gamma=2, num_class=2, epsilon=1e-7):
super().__init__()
if alpha == []:
self.alpha = torch.ones(num_class)
else:
self.alpha = torch.tensor(alpha, dtype=torch.float32)
self.gamma = gamma
self.epsilon = epsilon
def forward(self, pred, target):
assert len(pred.shape) == 2, 'pred shape should be N, num_class'
assert len(target.shape) == 1, 'target shape should be N'
pred = torch.softmax(pred, dim=-1)
target_pred = -F.nll_loss(pred, target, reduction='none')
loss = -torch.log(target_pred + self.epsilon)
class_alpha = torch.tensor([self.alpha[c.item()] for c in target],
dtype=torch.float32).to(loss.device)
weights = ((1 - target_pred)**self.gamma) * class_alpha
loss = (weights * loss).mean()
return loss
class My_loss(torch.nn.Module):
def __init__(self):
super().__init__()
self.focal_loss = Focal_Loss(num_class=3)
def forward(self, out, anno_y, label_y):
anno_x = out[..., 0]
label_x = out[..., 1:]
if len(anno_x.shape) == 2:
anno_x = anno_x.reshape(-1)
anno_y = anno_y.reshape(-1)
# loss_ccc = -ccc(anno_x, anno_y)[0]
# loss_mse = F.mse_loss(anno_x, anno_y)
loss_l1 = F.l1_loss(anno_x, anno_y)
# logits = F.log_softmax(label_x, dim=-1)
# loss_ce = F.nll_loss(logits, label_y)
if len(label_x.shape) == 3:
label_x = label_x.reshape(-1, label_x.shape[-1])
label_y = label_y.reshape(-1)
# loss_ce = F.cross_entropy(label_x, label_y, reduce='mean')
# loss_focal = self.focal_loss(label_x, label_y)
# loss = loss_ccc + loss_ce
# loss = loss_mse + loss_ce
# loss = loss_mse + loss_focal
# loss = loss_mse * 100
# loss = loss_l1 * 100 + loss_focal
loss = loss_l1 * 1000
return loss
def ccc(y_pred, y_true, epsilon=1e-7):
assert len(y_pred.shape) == 1
true_mean = y_true.mean()
pred_mean = y_pred.mean()
v_true = y_true - true_mean
v_pred = y_pred - pred_mean
rho = (v_pred * v_true).sum() / (torch.sqrt(
(v_pred**2).sum()) * torch.sqrt((v_true**2).sum()) + epsilon)
std_predictions = torch.std(y_pred)
std_gt = torch.std(y_true)
ccc = 2 * rho * std_gt * std_predictions / (
(std_predictions**2 + std_gt**2 +
(pred_mean - true_mean)**2) + epsilon)
return ccc, rho
def img_dirs_filter(img_dirs, dataset):
'''
some clips are not labeled...
'''
_img_dirs = []
if dataset == 'SAMM':
anno_dict = np.load(osp.join(paths.SAMM_LABEL_DIR, 'anno_dict.npy'),
allow_pickle=True).item()
elif dataset == 'CASME_2':
anno_dict = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'anno_dict.npy'),
allow_pickle=True).item()
else:
raise NotImplementedError
for img_dir in img_dirs:
if img_dir in anno_dict:
_img_dirs.append(img_dir)
else:
print('clip: {} is not labeled or labeled incorrectly.'.format(
img_dir))
return _img_dirs
def get_img_dirs(dataset):
if dataset == 'SAMM':
img_dirs = [
osp.join(paths.SAMM_VIDEO_DIR, name)
for name in os.listdir(paths.SAMM_VIDEO_DIR)
]
elif dataset == 'CASME_2':
_img_dirs = [[
osp.join(paths.CASME_2_VIDEO_DIR, name1, name2)
for name2 in os.listdir(osp.join(paths.CASME_2_VIDEO_DIR, name1))
] for name1 in os.listdir(paths.CASME_2_VIDEO_DIR)]
img_dirs = []
for dirs in _img_dirs:
img_dirs.extend(dirs)
else:
raise NotImplementedError
img_dirs = img_dirs_filter(img_dirs, dataset)
return img_dirs
def leave_one_out(img_dirs, dataset):
img_dirs_dict = {}
img_dirs = sorted(img_dirs)
if dataset == 'SAMM':
keys = []
for img_dir in img_dirs:
keys.append(osp.basename(img_dir).split('_')[0]) # 006, 007...
keys = sorted(list(set(keys)))
for key in keys:
train_set = []
val_set = []
for img_dir in img_dirs:
if key in img_dir:
val_set.append(img_dir)
else:
train_set.append(img_dir)
img_dirs_dict[key] = [train_set, val_set]
elif dataset == 'CASME_2':
keys = []
for img_dir in img_dirs:
keys.append(img_dir.split('/')[-2]) # s15, s16...
keys = sorted(list(set(keys)))
for key in keys:
train_set = []
val_set = []
for img_dir in img_dirs:
if img_dir.split('/')[-2] == key:
val_set.append(img_dir)
else:
train_set.append(img_dir)
img_dirs_dict[key] = [train_set, val_set]
else:
raise NotImplementedError
return img_dirs_dict
def adjust_learning_rate(optimizer, epoch, lr_strat, wd, lr_steps, factor=0.1):
"""Sets the learning rate to the initial LR decayed by 10 every N epochs"""
decay = factor**(sum(epoch >= np.asarray(lr_steps)))
lr = lr_strat * decay
decay = wd
for param_group in optimizer.param_groups:
param_group['lr'] = lr
param_group['weight_decay'] = decay
def save_checkpoint(state, is_best, save_root, root_model, filename='val'):
torch.save(
state,
'%s/%s/%s_checkpoint.pth.tar' % (save_root, root_model, filename))
if is_best:
shutil.copyfile(
'%s/%s/%s_checkpoint.pth.tar' % (save_root, root_model, filename),
'%s/%s/%s_best_loss.pth.tar' % (save_root, root_model, filename))
# print("checkpoint saved to",
# '%s/%s/%s_best_loss.pth.tar' % (save_root, root_model, filename))
def check_rootfolders(args):
"""Create log and model folder"""
folders_util = [
args.root_log, args.root_model, args.root_output, args.root_runs
]
folders_util = [
"%s/" % (args.save_root) + folder for folder in folders_util
]
for folder in folders_util:
if not os.path.exists(folder):
print('creating folder ' + folder)
os.makedirs(folder)
def evaluate(pred_anno_dict,
pred_label_dict,
dataset,
threshold=0.9,
val_id='all',
epoch=-1,
args=None):
if dataset == 'SAMM':
pred_gt = np.load(osp.join(paths.SAMM_ROOT, 'pred_gt.npy'),
allow_pickle=True).item()
anno_dict = np.load(osp.join(paths.SAMM_ROOT, 'anno_dict.npy'),
allow_pickle=True).item()
fps = 200
elif dataset == 'CASME_2':
pred_gt = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'pred_gt.npy'),
allow_pickle=True).item()
anno_dict = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'anno_dict.npy'),
allow_pickle=True).item()
fps = 30
else:
raise NotImplementedError
result_dict = {}
for img_dir, pred_annos in pred_anno_dict.items():
pred_labels = pred_label_dict[img_dir]
gt_list = pred_gt[img_dir]
pred_list = []
# scan all possible peak point
for peak_idx in range(0, len(pred_annos), fps):
is_peak = True
front = peak_idx
tail = peak_idx
# label_sum = pred_labels[peak_idx]
cumsum = pred_annos[peak_idx]
while is_peak and cumsum < threshold and (
front > 0 or tail < len(pred_annos) - 1):
if front - 1 >= 0:
front -= 1
cumsum += pred_annos[front]
# label_sum += pred_labels[front]
if tail + 1 < len(pred_annos):
tail += 1
cumsum += pred_annos[tail]
# label_sum += pred_labels[tail]
is_peak = pred_annos[peak_idx] >= pred_annos[
front] and pred_annos[peak_idx] >= pred_annos[tail]
if is_peak and cumsum >= threshold:
# TODO: label func
pred_list.append([front, tail, -1])
M = len(gt_list)
N = len(pred_list)
A = 0
for [onset, offset, label_gt] in gt_list:
for [
front, tail, _
] in pred_list: # TODO: if one pred could match more than one gt?
if front < onset:
b1 = [front, tail]
b2 = [onset, offset]
else:
b2 = [front, tail]
b1 = [onset, offset]
# 1
if b1[1] >= b2[0] and b2[1] >= b1[1]:
overlap = b1[1] - b2[0] + 1
union = b2[1] - b1[0] + 1
elif b1[1] >= b2[1]:
overlap = b2[1] - b2[0] + 1
union = b1[1] - b1[0] + 1
else:
# no overlap
overlap = 0
union = 1
if overlap / union >= 0.5:
A += 1
break
result_dict[img_dir] = [M, N, A]
ret_info = []
M = 0
N = 0
A = 0
for key, (m, n, a) in result_dict.items():
# p = a / n
# r = a / m
# f = 2 * r * p / (p + r)
# ret_info.append('[{}] P: {.4f}, R: {:.4f}, F1: {:.4f}'.format(
# key, p, r, f))
M += m
N += n
A += a
if M == 0 or N == 0 or A == 0:
precision = -1.0
recall = -1.0
f_score = -1.0
else:
precision = A / N
recall = A / M
f_score = 2 * recall * precision / (recall + precision)
ret_info.append('[over all] P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(
precision, recall, f_score))
# save fig
column = 3
fig = plt.figure(figsize=(10,
((len(pred_anno_dict) - 1) // column + 1) * 2))
for i, (img_dir, pred_annos) in enumerate(pred_anno_dict.items()):
fig.add_subplot((len(pred_anno_dict) - 1) // column + 1, column, i + 1)
plt.plot(pred_annos, 'b-', alpha=0.5)
plt.plot(anno_dict[img_dir], 'r-', alpha=0.5)
fig.tight_layout()
plt.savefig(
osp.join(args.save_root, args.root_output,
'{}_anno_{}.pdf'.format(val_id, epoch)))
plt.close('all')
return ret_info, f_score, (M, N, A)
def evaluate_bi_labels(pred_and_gt, val_id, epoch, args):
keys = sorted(list(pred_and_gt.keys()))
imgs_dirs = sorted(list(set([osp.dirname(img_p) for img_p in keys])))
result_dict = {}
for imgs_dir in imgs_dirs:
result_dict[imgs_dir] = []
img_ps = dataset_utils.scan_jpg_from_img_dir(imgs_dir)
for img_p in img_ps:
result_dict[imgs_dir].append(pred_and_gt.get(
img_p, [0, 0])) # [pred, target]
result_dict[imgs_dir] = np.asarray(result_dict[imgs_dir])
precision, recall, f_score, MNA, result_dict, match_regions_record = evaluate_pred_and_gt(
result_dict, args)
# visulization
if args.local_rank == 0:
column = 3
fig = plt.figure(figsize=(10,
((len(imgs_dirs) - 1) // column + 1) * 2))
for i, imgs_dir in enumerate(imgs_dirs):
fig.add_subplot((len(imgs_dirs) - 1) // column + 1, column, i + 1)
data = result_dict[imgs_dir]
pred = data[:, 0]
target = data[:, 1]
plt.plot(pred, 'b-', alpha=0.5)
plt.plot(target, 'r-', alpha=0.5) # gt
plt.title(osp.basename(imgs_dir))
fig.tight_layout()
out_dir = osp.join(args.save_root, args.root_output, val_id)
os.makedirs(out_dir, exist_ok=True)
plt.savefig(osp.join(out_dir, 'bi_label_{}.pdf'.format(epoch)))
plt.close('all')
return precision, recall, f_score, MNA, match_regions_record
def extend_front(front, pred, patience):
assert pred[front] > 0
d = patience
while d > 0:
if front + d < len(pred) and pred[front + d] > 0:
return extend_front(front + d, pred, patience)
d -= 1
return front
def evaluate_pred_and_gt(result_dict, args):
if args.dataset == 'SAMM':
# patience = 25
pred_gt = np.load(osp.join(paths.SAMM_ROOT, 'pred_gt.npy'),
allow_pickle=True).item()
elif args.dataset == 'CASME_2':
pred_gt = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'pred_gt.npy'),
allow_pickle=True).item()
# patience = 10
else:
raise NotImplementedError
M = 0
N = 0
A = 0
match_regions_record = {}
for imgs_dir, data in result_dict.items():
pred = data[:, 0]
target = data[:, 1]
found_regions = []
match_regions = [
] # gt_onset, gt_offset, pred_onset, pred_offset, TP/FP
front = 0
while front < len(pred):
tail = front
if pred[front] > 0:
tail = extend_front(front, pred, args.patience)
if front < tail: # find one region
found_regions.append([front, tail])
front = tail + args.patience
# modify result_dict
pred = np.zeros_like(pred)
for front, tail in found_regions:
pred[front:tail] = 1
data[:, 0] = pred
result_dict[imgs_dir] = data
# eval precision, recall, f_score
gt_list = pred_gt[imgs_dir]
m = len(gt_list)
n = len(found_regions)
a = 0
# TODO: determine whether one predicted region is macro or micro-expression
gt_regions_mark = np.zeros(m)
found_regions_mark = np.zeros(n)
for mg, [onset, offset, label_gt] in enumerate(gt_list):
# label_gt: 1->macro, 2->micro
for mf, [front, tail] in enumerate(
found_regions
): # TODO: if one found region can match more than one gt region
if front < onset:
b1 = [front, tail]
b2 = [onset, offset]
else:
b1 = [onset, offset]
b2 = [front, tail]
# 1
if b1[1] >= b2[0] and b2[1] >= b1[1]:
overlap = b1[1] - b2[0] + 1
union = b2[1] - b1[0] + 1
elif b1[1] >= b2[1]:
overlap = b2[1] - b2[0] + 1
union = b1[1] - b1[0] + 1
else: # no overlap
overlap = 0
union = 1
if overlap / union >= 0.5:
a += 1
found_regions_mark[mf] = 1
gt_regions_mark[mg] = 1
match_regions.append([onset, offset, front, tail, 'TP'])
break
for mg in range(m):
if gt_regions_mark[mg] == 0:
onset, offset, _ = gt_list[mg]
match_regions.append([onset, offset, '-', '-', 'FN'])
for mf in range(n):
if found_regions_mark[mf] == 0:
front, tail = found_regions[mf]
match_regions.append(['-', '-', front, tail, 'FP'])
match_regions_record[imgs_dir] = match_regions
M += m
N += n
A += a
# NOTE: if one found region can match more than one gt region, TP+FP may be greater than n
# result of the participant
if A == 0 or N == 0:
precision = -1.0
recall = -1.0
f_score = -1.0
else:
precision = A / N
recall = A / M
f_score = 2 * precision * recall / (precision + recall)
return precision, recall, f_score, (M, N,
A), result_dict, match_regions_record
def calculate_metric_from_dict_MNA(MNA_all):
M = 0
N = 0
A = 0
for k, mna in MNA_all.items():
m, n, a = mna
M += m
N += n
A += a
try:
precision = A / N
recall = A / M
f_score = 2 * precision * recall / (precision + recall)
except:
precision = -1.0
recall = -1.0
f_score = -1.0
return precision, recall, f_score
def synchronize():
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def reduce_loss(loss, args):
if args.distributed:
loss = reduce_tensor(loss.data, float(args.world_size))
return loss
def synchronize_pred_and_gt(pred_and_gt, epoch, args, remove=True):
if args.distributed:
out_dir = osp.join(args.save_root, args.root_runs,
'temp_{}'.format(epoch))
if args.local_rank == 0:
os.makedirs(out_dir, exist_ok=True)
synchronize() # make dir done
np.save(
osp.join(out_dir,
'temp_pred_and_gt_{}.npy'.format(args.local_rank)),
pred_and_gt)
synchronize() # save done
if args.local_rank == 0:
pred_and_gt = {}
for name in os.listdir(out_dir):
data = np.load(osp.join(out_dir, name),
allow_pickle=True).item()
pred_and_gt.update(data)
np.save(osp.join(out_dir, 'temp_pred_and_gt_merge.npy'),
pred_and_gt)
synchronize() # merge done
else:
synchronize() # start read
pred_and_gt = np.load(osp.join(out_dir,
'temp_pred_and_gt_merge.npy'),
allow_pickle=True).item()
synchronize() # read done
if remove and args.local_rank == 0:
shutil.rmtree(out_dir)
return pred_and_gt
def synchronize_f_score(f_score, args):
assert isinstance(f_score, float)
if args.distributed:
f_score = torch.tensor(f_score).cuda()
assert f_score.dtype == torch.float32
synchronize() # wait tensor allocation
dist.broadcast(f_score, src=0)
f_score = f_score.item()
return f_score
def synchronize_list(list_obj, args):
assert isinstance(list_obj, (list, tuple))
if args.distributed:
list_obj = torch.tensor(list_obj, dtype=torch.int32).cuda()
synchronize() # wait tensor allocation
dist.broadcast(list_obj, src=0)
list_obj = list_obj.cpu().numpy().tolist()
return list_obj
def delete_records(total_MNA, match_regions_record_all, val_id):
# keys1 = list(total_MNA.keys())
keys2 = list(match_regions_record_all.keys())
rm_key = val_id
del total_MNA[rm_key]
for k in keys2:
if k.split('/')[-2] == rm_key or osp.basename(k).split(
'_')[0] == rm_key:
del match_regions_record_all[k]
return total_MNA, match_regions_record_all | Python | 600 | 32.474998 | 98 | /utils.py | 0.506921 | 0.491735 |
guanjz20/MM21_FME_solution | refs/heads/master | # SAMM
SAMM_ROOT = '/data/gjz_mm21/SAMM'
SAMM_LABEL_DIR = SAMM_ROOT
SAMM_VIDEO_DIR = '/data/gjz_mm21/SAMM/SAMM_longvideos'
# CASME_2
CASME_2_ROOT = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped'
CASME_2_LABEL_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/labels'
CASME_2_VIDEO_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/longVideoFaceCropped' | Python | 9 | 48.111111 | 115 | /paths.py | 0.786848 | 0.741497 |
guanjz20/MM21_FME_solution | refs/heads/master | from unicodedata import name
import cv2
import os
import pdb
import torch
import time
import pywt
import glob
import numpy as np
import os.path as osp
from tqdm import tqdm
from torch.utils.data import Dataset
from torch import nn as nn
from . import params
from . import utils
WT_CHANNEL = 4
sm_kernel = np.load(params.GAUSS_KERNEL_PATH['sm_kernel'])
dr1_kernel = np.load(params.GAUSS_KERNEL_PATH['dr1_kernel'])
dr2_kernel = np.load(params.GAUSS_KERNEL_PATH['dr2_kernel'])
dr1_kernel = dr1_kernel[:, None, None]
dr2_kernel = dr2_kernel[:, None, None]
class SAMMDataset(Dataset):
def __init__(self,
mode,
img_dirs,
seq_len=64,
step=32,
time_len=12,
input_size=256,
data_aug=False,
data_option=None,
dataset_name='SAMM'):
super().__init__()
self.dataset_name = dataset_name
self.mode = mode
self.seq_len = seq_len
self.step = step
assert mode == 'train' or (mode == 'test'
and self.seq_len <= self.step)
self.time_len = time_len # observate time_len//2 frames before and after
self.size = input_size if data_option == 'diff' else input_size * 2
self.img_dirs = img_dirs # imgs files dirs
if not isinstance(self.img_dirs, list):
self.img_dirs = [self.img_dirs]
self.img_ps_dict = self._get_img_ps_dict()
self.seq_list = self._get_seq_list()
self.label_dict = np.load(osp.join(params.SAMM_ROOT, 'label_dict.npy'),
allow_pickle=True).item()
self.anno_dict = np.load(osp.join(params.SAMM_ROOT, 'anno_dict.npy'),
allow_pickle=True).item()
# print('Load {} clips, {} frames from {}'.format(
# len(self.seq_list),
# len(self.seq_list) * self.seq_len, dataset_name))
self.transform = utils.get_group_transform(
mode) if data_aug else utils.Identity()
self.data_option = data_option
def _get_img_ps_dict(self):
ret_dict = {}
for img_dir in self.img_dirs:
img_ps = utils.scan_jpg_from_img_dir(img_dir)
ret_dict[img_dir] = tuple(img_ps)
return ret_dict
def _get_seq_list(self):
ret_list = []
for img_dir, img_ps in self.img_ps_dict.items():
front = 0
tail = front + self.seq_len # [front, tail), tail not include
while tail <= len(img_ps):
ret_list.append([img_dir, front,
tail]) # (img dir, front_idx, tail_idx)
front += self.step
tail = front + self.seq_len
return ret_list
def __len__(self):
return len(self.seq_list)
def __getitem__(self, index):
img_dir, front, tail = self.seq_list[
index] # [front, tail), tail not include
seq_info = (img_dir, front, tail)
# insert and append extra imgs for temporal conv
_old_len = len(self.img_ps_dict[img_dir])
img_ps = list(self.img_ps_dict[img_dir][front:tail])
for i in range(1, self.time_len // 2 + 1):
img_ps.insert(0, self.img_ps_dict[img_dir][max(0, front - i)])
img_ps.append(self.img_ps_dict[img_dir][min(
_old_len - 1, tail - 1 + i)])
_cur_len = len(self.img_ps_dict[img_dir])
assert _old_len == _cur_len # make sure the dict has not been changed
# read seqence features, annos and labels
img_features = np.stack([
np.load(p.replace('.jpg', '.npy'))
for p in img_ps[self.time_len // 2:-self.time_len // 2]
], 0)
annos = self.anno_dict[img_dir][front:tail]
labels = self.label_dict[img_dir][front:tail]
assert img_features.shape == (self.seq_len, 2048) # resnet50 features
# read sequence imgs
flat_imgs = np.empty(
(self.seq_len + (self.time_len // 2) * 2, self.size, self.size),
dtype=np.float32)
for i, p in enumerate(img_ps):
img = cv2.imread(p, cv2.IMREAD_GRAYSCALE)
if not img.shape[0] == img.shape[1]:
# crop to square
h, w = img.shape
wide = abs(h - w) // 2
if h > w:
img = img[wide:wide + w, :]
else:
img = img[:, wide:wide + h]
try:
assert img.shape[0] == img.shape[1]
except:
print('Error in cropping image {}'.format(p))
img = cv2.resize(img, (self.size, self.size))
flat_imgs[i] = img
# transform
flat_imgs = self.transform(flat_imgs)
if self.data_option is not None and 'wt' in self.data_option:
flat_wts = np.stack([dwt2(img) for img in flat_imgs], 0)
# expand falt imgs
i = 0
front = 0
tail = front + self.time_len # [front, tail], tail include
if self.data_option is not None and 'wt' in self.data_option:
seq_wts = np.empty((self.seq_len, self.time_len + 1, WT_CHANNEL,
self.size // 2, self.size // 2),
dtype=np.float32)
elif self.data_option == 'diff':
seq_imgs = np.empty(
(self.seq_len, self.time_len + 1, self.size, self.size),
dtype=np.float32)
while tail < len(flat_imgs):
if self.data_option is not None and 'wt' in self.data_option:
seq_wts[i] = flat_wts[front:tail + 1].copy()
elif self.data_option == 'diff':
seq_imgs[i] = flat_imgs[front:tail + 1].copy()
i += 1
front += 1
tail += 1
assert i == self.seq_len
# data options
if self.data_option == 'diff':
ret_coefs = np.stack([get_diff(imgs) for imgs in seq_imgs], 0)
elif self.data_option == 'wt_diff':
ret_coefs = np.stack([get_diff(coefs) for coefs in seq_wts],
0).reshape(self.seq_len,
self.time_len * WT_CHANNEL,
self.size // 2, self.size // 2)
elif self.data_option == 'wt_dr':
ret_coefs = seq_wts.transpose(0, 2, 1, 3, 4)
ret_coefs = np.asarray([[
get_smoothing_and_dr_coefs(coefs_dim2)
for coefs_dim2 in coefs_dim1
] for coefs_dim1 in ret_coefs])
assert ret_coefs.shape[:3] == (self.seq_len, WT_CHANNEL, 3 * 2)
ret_coefs = ret_coefs.transpose(0, 2, 1, 3, 4)
ret_coefs = ret_coefs.reshape(self.seq_len, -1, self.size // 2,
self.size // 2)
elif self.data_option is None:
print('Require data option...')
exit()
else:
raise NotImplementedError
ret_coefs = torch.FloatTensor(ret_coefs)
img_features = torch.FloatTensor(img_features)
annos = torch.FloatTensor(annos)
labels = torch.LongTensor(labels)
return ret_coefs, img_features, annos, labels, seq_info
class CASME_2Dataset(SAMMDataset):
def __init__(self,
mode,
img_dirs,
seq_len=64,
step=32,
time_len=12,
input_size=256,
data_aug=False,
data_option=None,
dataset_name='CASME_2'):
super().__init__(mode,
img_dirs,
seq_len=seq_len,
step=step,
time_len=time_len,
input_size=input_size,
data_aug=data_aug,
data_option=data_option,
dataset_name=dataset_name)
self.label_dict = np.load(osp.join(params.CASME_2_LABEL_DIR,
'label_dict.npy'),
allow_pickle=True).item()
self.anno_dict = np.load(osp.join(params.CASME_2_LABEL_DIR,
'anno_dict.npy'),
allow_pickle=True).item()
class SAMMImageDataset(Dataset):
def __init__(self, img_ps):
super().__init__()
self.img_ps = img_ps
self.bi_label = np.load(
osp.join(params.SAMM_ROOT, 'bi_label.npy'),
allow_pickle=True).item() # imgs_dir -> [<target img_p> ... ]
def __len__(self):
return len(self.img_ps)
def __getitem__(self, index):
img_p = self.img_ps[index]
npy_p = img_p.replace('.jpg', '.npy')
feature = np.load(npy_p)
feature = torch.tensor(feature, dtype=torch.float32)
imgs_dir = osp.dirname(img_p)
label = 1 if img_p in self.bi_label[
imgs_dir] else 0 # 1 for spotting region
label = torch.tensor(label, dtype=torch.long)
return feature, label, img_p
class CASME_2ImageDataset(SAMMImageDataset):
def __init__(self, img_ps):
super().__init__(img_ps)
self.bi_label = np.load(
osp.join(params.CASME_2_LABEL_DIR, 'bi_label.npy'),
allow_pickle=True).item() # imgs_dir -> [<target img_p> ... ]
def get_diff(imgs):
if len(imgs.shape) == 3:
assert imgs.shape[1] == imgs.shape[2] # imgs
elif len(imgs.shape) == 4:
assert imgs.shape[2] == imgs.shape[
3] and imgs.shape[1] == WT_CHANNEL # wt_coefs
imgs1 = imgs[:-1]
imgs2 = imgs[1:]
return imgs2 - imgs1
def dwt2(img, wave_name='haar'):
assert isinstance(img, np.ndarray)
coefs = pywt.dwt2(img, wave_name)
coefs = np.array([coefs[0], *coefs[1]])
return coefs # (4, w//2, h//2)
def get_smoothing_and_dr_coefs(imgs):
'''
GAUSS_KERNEL_PATH
'''
global sm_kernel, dr1_kernel, dr2_kernel
sm_imgs = np.array([cv2.filter2D(img, -1, sm_kernel) for img in imgs])
dr_ks = dr1_kernel.shape[0]
dr1_res = []
dr2_res = []
for i in range(len(imgs) - dr_ks + 1):
_imgs = sm_imgs[i:i + dr_ks]
dr1_res.append((_imgs * dr1_kernel).sum(axis=0))
dr2_res.append((_imgs * dr2_kernel).sum(axis=0))
res = np.stack((*dr1_res, *dr2_res), 0)
return res
| Python | 284 | 36.070423 | 81 | /dataset/me_dataset.py | 0.510163 | 0.495916 |
guanjz20/MM21_FME_solution | refs/heads/master | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import glob
import os
import os.path as osp
from torch.serialization import load
class MLP(nn.Module):
def __init__(self, hidden_units, dropout=0.3):
super(MLP, self).__init__()
input_feature_dim = hidden_units[0]
num_layers = len(hidden_units) - 1
assert num_layers > 0
assert hidden_units[-1] == 256
fc_list = []
for hidden_dim in hidden_units[1:]:
fc_list += [
nn.Dropout(dropout),
nn.Linear(input_feature_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
]
input_feature_dim = hidden_dim
self.mlp = nn.Sequential(*fc_list)
def forward(self, input_tensor):
bs, num_frames, feature_dim = input_tensor.size()
input_tensor = input_tensor.reshape(bs * num_frames, feature_dim)
out = self.mlp(input_tensor)
return out.reshape(bs, num_frames, -1)
class Temporal_Net(nn.Module):
def __init__(self, input_size, num_channels, hidden_units, dropout,
feature):
super().__init__()
assert input_size in [112, 128, 224, 256]
self.feature = feature # return feature before classification
# 4 layers conv net
self.conv_net = []
self.conv_net.append(
self._make_conv_layer(num_channels, 2**6, stride=2))
for i in range(7, 10):
self.conv_net.append(
self._make_conv_layer(2**(i - 1), 2**i, stride=2))
self.conv_net = nn.Sequential(*self.conv_net)
last_conv_width = input_size // (2**4)
last_conv_dim = 2**9
self.dropout = nn.Dropout2d(p=0.2)
# self.avgpool = nn.AvgPool2d(
# kernel_size=[last_conv_width, last_conv_width])
fc_list = []
fc_list += [
nn.Linear(last_conv_dim, hidden_units[0]),
nn.ReLU(inplace=True),
nn.BatchNorm1d(hidden_units[0]),
nn.Dropout(dropout)
]
for i in range(0, len(hidden_units) - 2):
fc_list += [
nn.Linear(hidden_units[i], hidden_units[i + 1]),
nn.ReLU(inplace=True),
nn.BatchNorm1d(hidden_units[i + 1]),
nn.Dropout(dropout)
]
self.fc = nn.Sequential(*fc_list)
# not used
final_norm = nn.BatchNorm1d(1, eps=1e-6, momentum=0.1)
self.classifier = nn.Sequential(
nn.Linear(hidden_units[-2], hidden_units[-1]), final_norm)
def _make_conv_layer(self, in_c, out_c, kernel_size=3, stride=2):
ks = kernel_size
conv_layer = nn.Sequential(
nn.Conv2d(in_c, out_c, kernel_size=(ks, ks), padding=ks // 2),
nn.BatchNorm2d(out_c,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True),
nn.ReLU(inplace=True),
nn.Conv2d(out_c,
out_c,
kernel_size=(ks, ks),
padding=ks // 2,
stride=stride),
nn.BatchNorm2d(out_c,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True),
nn.ReLU(inplace=True),
)
return conv_layer
def forward(self, wt_data):
bs, num_frames, num_channel, W0, H0 = wt_data.size()
wt_data = wt_data.reshape(bs * num_frames, num_channel, W0, H0)
conv_out = self.conv_net(wt_data)
avgpool = F.adaptive_avg_pool2d(conv_out, (1, 1))
# avgpool = self.avgpool(conv_out)
avgpool = avgpool.reshape(bs * num_frames, -1)
out = self.fc(avgpool)
if self.feature:
return out
else:
out = self.classifier(out)
return out
class Two_Stream_RNN(nn.Module):
def __init__(self,
mlp_hidden_units=[2048, 256, 256],
dropout=0.3,
inchannel=12,
size=256,
outchannel=4):
super().__init__()
self.mlp = MLP(mlp_hidden_units)
self.temporal_net = Temporal_Net(size,
inchannel,
hidden_units=[256, 256, 1],
dropout=0.3,
feature=True)
self.transform = nn.Sequential(nn.Linear(512, 256),
nn.ReLU(inplace=True),
nn.BatchNorm1d(256),
nn.Dropout(dropout))
self.rnns = nn.GRU(256,
128,
bidirectional=True,
num_layers=2,
dropout=0.3,
batch_first=True)
self.classifier = nn.Sequential(nn.Dropout(dropout),
nn.Linear(256, outchannel),
nn.BatchNorm1d(outchannel), nn.ReLU())
_init_weights(self)
def forward(self, temp_data, rgb_data, return_feature=False):
bs, num_frames = rgb_data.size(0), rgb_data.size(1)
# spatial features
features_cnn = self.mlp(rgb_data)
features_spatial = features_cnn.reshape(bs, num_frames, -1)
# temporal features
features_temporal = self.temporal_net(temp_data)
features_temporal = features_temporal.reshape(bs, num_frames, -1)
features = torch.cat([features_spatial, features_temporal], dim=-1)
features = self.transform(features.reshape(bs * num_frames, -1))
features = features.reshape(bs, num_frames, -1)
# rnn combination
outputs_rnns, _ = self.rnns(features)
outputs_rnns = outputs_rnns.reshape(bs * num_frames, -1)
out = self.classifier(outputs_rnns)
out = out.reshape(bs, num_frames, -1)
if return_feature:
return out
# anno transforms
out[..., 0] = torch.log(out[..., 0] + 1)
return out
class Two_Stream_RNN_Cls(Two_Stream_RNN):
def __init__(self,
mlp_hidden_units=[2048, 256, 256],
dropout=0.3,
inchannel=12,
size=256,
outchannel=2):
super().__init__(mlp_hidden_units=mlp_hidden_units,
dropout=dropout,
inchannel=inchannel,
size=size,
outchannel=outchannel)
self.classifier = nn.Sequential(nn.Dropout(dropout),
nn.Linear(256, outchannel))
_init_weights(self)
def forward(self, temp_data, rgb_data):
out = super().forward(temp_data, rgb_data, return_feature=True)
return out
class ResNet50_Cls(nn.Module):
def __init__(self, num_class=2):
super().__init__()
self.fc = nn.Sequential(nn.Linear(2048, 512), nn.Dropout(0.5),
nn.Linear(512, num_class))
def forward(self, x):
assert x.shape[-1] == 2048
x = self.fc(x)
return x
def _init_weights(model):
for k, m in model.named_modules():
if isinstance(m, (nn.Conv3d, nn.Conv2d, nn.Conv1d)):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
# nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm3d, nn.BatchNorm2d, nn.BatchNorm1d)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.Linear)):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def load_pretrained_model(model, path, load_bn):
model_dict = model.state_dict()
state_dict = torch.load(path, map_location='cpu')['state_dict']
state_dict = {
k.replace('wt_net', 'temporal_net', 1): v
for k, v in state_dict.items()
}
# bn filter
if not load_bn:
bn_keys = []
for k in state_dict.keys():
if 'running_mean' in k:
bn_name = '.'.join(k.split('.')[:-1])
for name in [
'weight', 'bias', 'running_mean', 'running_var',
'num_batches_tracked'
]:
bn_keys.append(bn_name + '.' + name)
state_dict = {k: v for k, v in state_dict.items() if k not in bn_keys}
# # module name rank adjust
# for k, v in state_dict.items():
# if 'mlp.mlp.5' in k:
# state_dict[k.replace('mlp.mlp.5', 'mlp.mlp.4')] = v
# del state_dict[k]
# if 'temporal_net.fc.4' in k:
# state_dict[k.replace('temporal_net.fc.4',
# 'temporal_net.fc.3')] = v
# del state_dict[k]
# classifier filter
state_dict = {k: v for k, v in state_dict.items() if 'classifier' not in k}
model_dict.update(state_dict)
model.load_state_dict(model_dict)
return model
| Python | 264 | 35.21212 | 79 | /model/network.py | 0.494351 | 0.472803 |
guanjz20/MM21_FME_solution | refs/heads/master | import argparse
parser = argparse.ArgumentParser(description="x")
parser.add_argument('--store_name', type=str, default="")
parser.add_argument('--save_root', type=str, default="")
parser.add_argument('--tag', type=str, default="")
parser.add_argument('--snap', type=str, default="")
parser.add_argument('--dataset',
type=str,
default="",
choices=['SAMM', 'CASME_2'])
parser.add_argument('--data_aug', action='store_true')
parser.add_argument('--distributed', action='store_true')
parser.add_argument('--amp', action='store_true')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--seed", default=111, type=int)
parser.add_argument('--finetune_list',
default=[],
type=str,
nargs="+",
help='finetune subjects')
parser.add_argument("--patience",
default=15,
type=int,
help='front extend patience')
# ========================= Model Configs ==========================
parser.add_argument('--hidden_units',
default=[2048, 256, 256],
type=int,
nargs="+",
help='hidden units set up')
parser.add_argument('--length', type=int, default=64)
parser.add_argument('--step', type=int, default=64)
parser.add_argument('-L',
type=int,
default=12,
help='the number of input difference images')
parser.add_argument('--input_size', type=int, default=112)
parser.add_argument('--data_option',
type=str,
choices=['diff', 'wt_diff', 'wt_dr'])
# ========================= Learning Configs ==========================
parser.add_argument('--epochs',
default=25,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument(
'--early_stop', type=int,
default=3) # if validation loss didn't improve over 3 epochs, stop
parser.add_argument('-b',
'--batch_size',
default=16,
type=int,
metavar='N',
help='mini-batch size (default: 16)')
parser.add_argument('--lr', default=1e-2, type=float)
parser.add_argument('--lr_decay_factor', default=0.1, type=float)
parser.add_argument('--lr_steps',
default=[2, 5],
type=float,
nargs="+",
metavar='LRSteps',
help='epochs to decay learning rate by factor')
parser.add_argument('--optim', default='SGD', type=str)
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--weight-decay',
'--wd',
default=5e-4,
type=float,
metavar='W',
help='weight decay (default: 5e-4)')
parser.add_argument('--clip-gradient',
'--gd',
default=20,
type=float,
metavar='W',
help='gradient norm clipping (default: 20)')
parser.add_argument('--focal_alpha', default=[1., 1.], type=float, nargs="+")
# ========================= Monitor Configs ==========================
parser.add_argument('--print-freq',
'-p',
default=50,
type=int,
metavar='N',
help='print frequency (default: 50) iteration')
parser.add_argument('--eval-freq',
'-ef',
default=1,
type=int,
metavar='N',
help='evaluation frequency (default: 1) epochs')
# ========================= Runtime Configs ==========================
parser.add_argument('-j',
'--workers',
default=0,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--delete_last',
action='store_true',
help='delete the last recorded subject')
parser.add_argument('-t',
'--test',
dest='test',
action='store_true',
help='evaluate model on test set')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--gpus', type=str, default=None)
parser.add_argument('--root_log', type=str, default='log')
parser.add_argument('--root_model', type=str, default='model')
parser.add_argument('--root_output', type=str, default='output')
parser.add_argument('--root_runs', type=str, default='runs')
parser.add_argument('--load_pretrained', type=str, default='')
parser.add_argument('--load_bn', action='store_true')
| Python | 132 | 39.946968 | 77 | /config.py | 0.477336 | 0.46605 |
guanjz20/MM21_FME_solution | refs/heads/master | # CASME_2
CASME_2_ROOT = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped'
CASME_2_LABEL_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/labels'
CASME_2_VIDEO_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/longVideoFaceCropped'
# SAMM
SAMM_ROOT = '/data/gjz_mm21/SAMM'
SAMM_VIDEO_DIR = '/data/gjz_mm21/SAMM/SAMM_longvideos'
# resnet50 features
MODEL_DIR = '/home/gjz/fmr_backbone/pytorch-benchmarks/ferplus'
| Python | 11 | 44.454544 | 115 | /preprocess/params.py | 0.786 | 0.742 |
guanjz20/MM21_FME_solution | refs/heads/master | import pandas as pd
import numpy as np
import os.path as osp
dataset = 'CASME_2'
# dataset = 'SAMM'
submit_name = 'submit_{}.csv'.format(dataset)
result_dir_name = 'results'
submit_npy_name = 'match_regions_record_all.npy'
submit_id = 'done_exp_cls_ca_20210708-215035'
def convert_key(k, dataset):
if dataset == 'CASME_2':
k = osp.basename(k)[:7]
elif dataset == 'SAMM':
k = osp.basename(k)
else:
raise NotImplementedError
return k
data = np.load(osp.join('.', result_dir_name, submit_id, 'output',
submit_npy_name),
allow_pickle=True).item()
metric = {'TP': 0, 'FN': 0, 'FP': 0}
with open(submit_name, 'w') as f:
if dataset == 'CASME_2':
f.write('2\r\n')
elif dataset == 'SAMM':
f.write('1\r\n')
else:
raise NotImplementedError
for k, v in data.items():
k = convert_key(k, dataset)
assert isinstance(v[0], list)
for line in v:
f.write(','.join([k, *[str(x) for x in line]]) + '\r\n')
metric[line[-1]] += 1
precision = metric['TP'] / (metric['TP'] + metric['FP'])
recall = metric['TP'] / (metric['TP'] + metric['FN'])
f_score = 2 * precision * recall / (precision + recall)
print('TP: {}, FP: {}, FN: {}'.format(metric['TP'], metric['FP'],
metric['FN']))
print('P: {:.4f}, R: {:.4f}, F: {:.4f}'.format(precision, recall, f_score))
| Python | 47 | 29.702127 | 75 | /submit.py | 0.544006 | 0.523216 |
guanjz20/MM21_FME_solution | refs/heads/master | import time
from matplotlib.pyplot import winter
import torch
import torch.nn.functional as F
import numpy as np
import utils
import dataset.utils as dataset_utils
import dataset.params as DATASET_PARAMS
def train(dataloader, model, criterion, optimizer, epoch, logger, args,
amp_autocast, loss_scaler):
batch_time = utils.AverageMeter()
data_time = utils.AverageMeter()
losses = utils.AverageMeter()
end = time.time()
model.train()
for i, data_batch in enumerate(dataloader):
data_time.update(time.time() - end)
temp_data, img_features, annos, labels, _ = data_batch
batch_size = temp_data.shape[0]
# # TODO: skip all zero samples
# if (labels == 0).all() and np.random.rand() <= 0.7:
# end = time.time()
# # print('skip all zeros batch...')
# continue
# keep_ids = []
# for bi in range(batch_size):
# if not ((labels[bi] == 0).all() and np.random.rand() <= 0.5):
# keep_ids.append(bi)
# # print('skip {} samples...'.format(batch_size - len(keep_ids)))
# batch_size = len(keep_ids) # m batch_size
# if batch_size == 0:
# end = time.time()
# # print('skip all zeros batch...')
# continue
# keep_ids = np.asarray(keep_ids)
# temp_data = temp_data[keep_ids]
# img_features = img_features[keep_ids]
# annos = annos[keep_ids]
# labels = labels[keep_ids]
# label preprocess
labels[labels > 0] = 1 # 1, 2 -> 1
temp_data = temp_data.cuda()
img_features = img_features.cuda()
# annos = annos.cuda()
labels = labels.cuda()
with amp_autocast():
out = model(temp_data, img_features)
# flat labels
out = out.reshape(batch_size * args.length, -1)
labels = labels.reshape(-1)
loss = criterion(out, labels)
# backward + step
optimizer.zero_grad()
if loss_scaler is None:
loss.backward()
optimizer.step()
else:
loss_scaler(loss, optimizer)
# distirbuted reduce
utils.reduce_loss(loss, args)
losses.update(loss.item(), temp_data.size(0))
batch_time.update(time.time() - end)
if args.local_rank == 0 and (i % args.print_freq == 0
or i == len(dataloader) - 1):
output = ('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch,
i + 1,
len(dataloader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
lr=optimizer.param_groups[-1]['lr']))
logger.info(output)
torch.cuda.synchronize()
end = time.time()
def validate(dataloader, model, criterion, logger, args, amp_autocast):
batch_time = utils.AverageMeter()
losses = utils.AverageMeter()
model.eval()
end = time.time()
# outs = []
# annos = []
# labels = []
# pred_anno_dict = {} # imgs_dir -> anno values
# pred_label_dict = {} # imgs_dir -> labels
# anno_dict = {}
# label_dict = {}
pred_and_gt = {} # img_p -> [pred, target]
for i, data_batch in enumerate(dataloader):
temp_data, img_features, annos, labels, seq_info = data_batch
# label preprocess
labels[labels > 0] = 1 # 1, 2 -> 1
batch_size = labels.shape[0]
temp_data = temp_data.cuda()
img_features = img_features.cuda()
# annos = annos.cuda()
labels = labels.cuda()
with torch.no_grad():
with amp_autocast():
out = model(temp_data, img_features)
loss = criterion(out.reshape(batch_size * args.length, -1),
labels.reshape(-1)).float()
if not torch.isnan(loss).any():
# distirbuted reduce
utils.reduce_loss(loss, args)
losses.update(loss.item(), temp_data.size(0))
batch_time.update(time.time() - end)
if args.local_rank == 0 and (i % args.print_freq == 0
or i == len(dataloader) - 1):
output = ('Val: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
i + 1,
len(dataloader),
batch_time=batch_time,
loss=losses))
logger.info(output)
torch.cuda.synchronize()
# record
img_dirs, fronts, tails = seq_info
for batch_idx in range(batch_size):
img_dir = img_dirs[batch_idx]
front = fronts[batch_idx].item()
tail = tails[batch_idx].item()
# assert batch_size == 1, 'batch size should be 1'
img_dir_ps = dataset_utils.scan_jpg_from_img_dir(img_dir)
# if not img_dir in pred_label_dict:
# pred_anno_dict[img_dir] = np.zeros(len(img_dir_ps))
# pred_label_dict[img_dir] = np.zeros(len(img_dir_ps))
# anno_dict = [img_dir] = np.zeros(len(img_dir_ps))
# label_dict = [img_dir] = np.zeros(len(img_dir_ps))
pred_label = torch.argmax(out[batch_idx], dim=-1).reshape(-1)
label = labels[batch_idx].reshape(-1)
for j in range(front, tail):
img_p = img_dir_ps[j]
pred_and_gt[img_p] = [
pred_label[j - front].item(), label[j - front].item()
]
# pred_anno_dict[img_dir][front:tail] += pred_annos
# assert (pred_label_dict[img_dir][front:tail] == 0
# ).all(), 'should be no overlap'
# pred_label_dict[img_dir][front:tail] += pred_labels
# anno_dict[img_dir][front:tail] += annos
# label_dict[img_dir][front:tail] += labels
end = time.time()
return losses.avg, pred_and_gt
| Python | 173 | 36.410404 | 78 | /trainer_cls.py | 0.4983 | 0.489648 |
guanjz20/MM21_FME_solution | refs/heads/master | '''
generate the emotion intensity of each frame
'''
# %%
import pdb
import os
import os.path as osp
from numpy.core.numeric import ones, ones_like
from numpy.lib.function_base import percentile
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import params
# %% ID2NAME and NAME2ID
# CASME_2_PID2NAME/NAME2PID
df = pd.read_csv(osp.join(params.CASME_2_LABEL_DIR, 'naming_rule1.csv'))
data = df.values
CASME_2_PID2NAME = {str(line[-1]): str(line[1]) for line in data}
CASME_2_NAME2PID = {str(line[1]): str(line[-1]) for line in data}
del df
del data
# CASME_2_VID2NAME
df = pd.read_csv(osp.join(params.CASME_2_LABEL_DIR, 'naming_rule2.csv'))
data = df.values
CASME_2_VID2NAME = {'{:04d}'.format(line[0]): str(line[1]) for line in data}
CASME_2_NAME2VID = {str(line[1]): '{:04d}'.format(line[0]) for line in data}
del df
del data
save_dict_dir = osp.join(params.CASME_2_ROOT, 'ID2NAME2ID')
os.makedirs(save_dict_dir, exist_ok=True)
for p, d in zip(
['pid2name', 'name2pid', 'vid2name', 'name2vid'],
[CASME_2_PID2NAME, CASME_2_NAME2PID, CASME_2_VID2NAME, CASME_2_NAME2VID]):
np.save(osp.join(save_dict_dir, p + '.npy'), d)
# %% main
anno_dict = {}
label_dict = {} # 0: none, 1: macro, 2: micro
pred_gt = {} # [[onset, offset, label],...]
bi_label_dict = {} # store all the img_ps fall into the spotting interval
df = pd.read_csv(osp.join(params.CASME_2_LABEL_DIR, 'CASFEcode_final.csv'))
data = df.values
for row in data:
# construct imgs dir for current row data
pid = str(row[0])
vname = row[1].split('_')[0]
pname = CASME_2_PID2NAME[pid]
vid = CASME_2_NAME2VID[vname]
name_code = pname[1:]
imgs_file_head = name_code + '_' + vid
for file_name in os.listdir(osp.join(params.CASME_2_VIDEO_DIR, pname)):
if file_name.startswith(imgs_file_head):
imgs_dir = osp.join(params.CASME_2_VIDEO_DIR, pname, file_name)
break
# update emotion intensity and label
imgs_name = [
name
for name in sorted(os.listdir(imgs_dir),
key=lambda x: int(x.split('.')[0].split('_')[-1]))
if '.jpg' in name
] # first img name: img_1.jpg
onset, apex, offset = row[2:2 + 3]
onset, apex, offset = int(onset), int(apex), int(offset)
if onset > 0 and apex > 0 and offset > 0:
pass
elif onset > 0 and apex > 0 and offset == 0:
offset = min(len(imgs_name), apex + (apex - onset))
elif onset > 0 and apex == 0 and offset > 0:
apex = (onset + offset) // 2
else:
raise Exception
try:
assert onset < apex and apex < offset
except:
print('[Error][{}] onset: {}, apex: {}, offset: {}, '.format(
imgs_dir, onset, apex, offset))
continue # skip this row
if not imgs_dir in anno_dict:
anno_dict[imgs_dir] = np.zeros(len(imgs_name))
label_dict[imgs_dir] = np.zeros(len(imgs_name))
pred_gt[imgs_dir] = []
bi_label_dict[imgs_dir] = []
# convert start index from 1 to 0
onset -= 1
apex -= 1
offset -= 1
# intensity
sigma = min(offset - apex, apex - onset) // 2
mu = apex
func = lambda x: np.exp(-(x - mu)**2 / 2 / sigma / sigma
) / sigma / np.sqrt(2 * np.pi)
# func = lambda x: (x - onset) / (apex - onset) if x >= apex else (
# offset - x) / (offset - apex)
cumsum = 0
for i in range(onset, offset + 1):
anno_dict[imgs_dir][i] += func(i)
cumsum += anno_dict[imgs_dir][i]
if cumsum < 0:
pdb.set_trace()
# print('onset2offset cumsum: {:.2f}'.format(cumsum))
# label
label_dict[imgs_dir][onset:offset +
1] = 1 if 'macro' in str(row[-2]).lower() else 2
# pred_gt
pred_gt[imgs_dir].append(
[onset, offset + 1, 1 if 'macro' in str(row[-2]).lower() else 2])
# bi_label
bi_label_dict[imgs_dir].extend(
[osp.join(imgs_dir, name) for name in imgs_name[onset:offset + 1]])
np.save(osp.join(params.CASME_2_LABEL_DIR, 'anno_dict.npy'), anno_dict)
np.save(osp.join(params.CASME_2_LABEL_DIR, 'label_dict.npy'), label_dict)
np.save(osp.join(params.CASME_2_LABEL_DIR, 'pred_gt.npy'), pred_gt)
np.save(osp.join(params.CASME_2_LABEL_DIR, 'bi_label.npy'), bi_label_dict)
# %% visulization
# fig = plt.figure(figsize=(30, 50))
# for i, (k, v) in enumerate(anno_dict.items()):
# fig.add_subplot((len(anno_dict) - 1) // 5 + 1, 5, i + 1)
# plt.plot(v)
# fig.tight_layout()
# plt.savefig('./CASME_2_annos.pdf')
# plt.show()
column = 5
fig = plt.figure(figsize=(30, ((len(label_dict) - 1) // column + 1) * 2))
for i, (k, v) in enumerate(label_dict.items()):
v[v > 0] = 1 # 1,2 -> 1
fig.add_subplot((len(label_dict) - 1) // column + 1, column, i + 1)
plt.plot(v, 'r-')
plt.title(osp.basename(k))
fig.tight_layout()
out_dir = './preprocess'
plt.savefig(osp.join(out_dir, 'ca_bi_label.pdf'))
plt.close('all')
# %%
| Python | 150 | 32.060001 | 78 | /preprocess/casme_2_label_generation.py | 0.597096 | 0.571486 |
guanjz20/MM21_FME_solution | refs/heads/master | '''
generate the emotion intensity of each frame
'''
# %%
import os
import pdb
import os.path as osp
from numpy.core.numeric import ones
from numpy.lib.function_base import percentile
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import params
# %% main
anno_dict = {} # intensity
label_dict = {} # 0: none, 1: macro, 2: micro
pred_gt = {} # [[onset, offset, label],...]
bi_label_dict = {} # store all the img_ps fall into the spotting interval
df = pd.read_csv(osp.join(params.SAMM_ROOT, 'SAMM_labels.csv'))
data = df.values
for row in data:
# construct imgs dir for current row data
file_name = row[1][:5]
imgs_dir = osp.join(params.SAMM_VIDEO_DIR, file_name)
assert osp.exists(imgs_dir)
# update emotion intensity and label
imgs_name = [
name
for name in sorted(os.listdir(imgs_dir),
key=lambda x: int(x.split('.')[0].split('_')[-1]))
if '.jpg' in name
] # first img name: xxx_x_0001.jpg
onset, apex, offset = row[3:3 + 3]
onset, apex, offset = int(onset), int(apex), int(offset)
if onset > 0 and apex > 0 and offset > 0:
pass
elif onset > 0 and apex > 0 and offset == -1:
offset = min(len(imgs_name), apex + (apex - onset))
elif onset > 0 and apex == -1 and offset > 0:
apex = (onset + offset) // 2
else:
raise Exception
try:
assert onset < apex and apex < offset
except:
print('[Error][{}] onset: {}, apex: {}, offset: {}, '.format(
imgs_dir, onset, apex, offset))
continue # skip this row
if not imgs_dir in anno_dict:
anno_dict[imgs_dir] = np.zeros(len(imgs_name))
label_dict[imgs_dir] = np.zeros(len(imgs_name))
pred_gt[imgs_dir] = []
bi_label_dict[imgs_dir] = []
# convert start index from 1 to 0
onset -= 1
apex -= 1
offset -= 1
# intensity
sigma = min(offset - apex, apex - onset) // 2 + 1e-7
if sigma <= 0:
pdb.set_trace()
mu = apex
func = lambda x: np.exp(-(x - mu)**2 / 2 / sigma / sigma
) / sigma / np.sqrt(2 * np.pi)
cumsum = 0
for i in range(onset, offset + 1):
anno_dict[imgs_dir][i] += func(i)
cumsum += anno_dict[imgs_dir][i]
# print('onset2offset cumsum: {:.2f}'.format(cumsum))
# label
label_dict[imgs_dir][onset:offset +
1] = 1 if 'macro' in str(row[-2]).lower() else 2
# pred_gt
pred_gt[imgs_dir].append(
[onset, offset + 1, 1 if 'macro' in str(row[-2]).lower() else 2])
# bi_label
bi_label_dict[imgs_dir].extend(
[osp.join(imgs_dir, name) for name in imgs_name[onset:offset + 1]])
np.save(osp.join(params.SAMM_ROOT, 'anno_dict.npy'), anno_dict)
np.save(osp.join(params.SAMM_ROOT, 'label_dict.npy'), label_dict)
np.save(osp.join(params.SAMM_ROOT, 'pred_gt.npy'), pred_gt)
np.save(osp.join(params.SAMM_ROOT, 'bi_label.npy'), bi_label_dict)
# %% visulization
# fig = plt.figure(figsize=(30, 50))
# for i, (k, v) in enumerate(anno_dict.items()):
# fig.add_subplot((len(anno_dict) - 1) // 5 + 1, 5, i + 1)
# plt.plot(v)
# fig.tight_layout()
# plt.savefig('./SAMM_annos.pdf')
# plt.show()
column = 5
fig = plt.figure(figsize=(30, ((len(label_dict) - 1) // column + 1) * 2))
for i, (k, v) in enumerate(label_dict.items()):
v[v > 0] = 1 # 1,2 -> 1
fig.add_subplot((len(label_dict) - 1) // column + 1, column, i + 1)
plt.plot(v, 'r-')
plt.title(osp.basename(k))
fig.tight_layout()
out_dir = './preprocess'
plt.savefig(osp.join(out_dir, 'sa_bi_label.pdf'))
plt.close('all')
# %%
| Python | 116 | 30.413794 | 77 | /preprocess/samm_2_label_generation.py | 0.580955 | 0.561197 |
guanjz20/MM21_FME_solution | refs/heads/master | from __future__ import division
from typing import Iterable
import cv2
import os
import time
import six
import sys
from tqdm import tqdm
import argparse
import pickle
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import torch.utils.data
import os.path as osp
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from glob import glob
import numbers
from PIL import Image, ImageOps
import random
import params
# for torch lower version
import torch._utils
from torch.nn import functional as F
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride,
requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size,
stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
global parsed
import torch.utils.data as data
# multi thread setting
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
class SAMMDataset(data.Dataset):
def __init__(self, data_root, transform=None):
super().__init__()
self.img_ps = glob(osp.join(data_root, '*/*.jpg'))
self.transform = transform
def __len__(self):
return len(self.img_ps)
def __getitem__(self, index):
img = Image.open(self.img_ps[index]).convert('RGB')
img = self.transform(img) if self.transform is not None else img
return img, self.img_ps[index]
class CASME_2Dataset(SAMMDataset):
def __init__(self, data_root, transform=None):
super().__init__(data_root, transform)
self.img_ps = glob(osp.join(data_root, '*/*/*.jpg'))
def load_module_2or3(model_name, model_def_path):
"""Load model definition module in a manner that is compatible with
both Python2 and Python3
Args:
model_name: The name of the model to be loaded
model_def_path: The filepath of the module containing the definition
Return:
The loaded python module."""
if six.PY3:
import importlib.util
spec = importlib.util.spec_from_file_location(model_name,
model_def_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
else:
import importlib
dirname = os.path.dirname(model_def_path)
sys.path.insert(0, dirname)
module_name = os.path.splitext(os.path.basename(model_def_path))[0]
mod = importlib.import_module(module_name)
return mod
def load_model(model_name, MODEL_DIR):
"""Load imoprted PyTorch model by name
Args:
model_name (str): the name of the model to be loaded
Return:
nn.Module: the loaded network
"""
model_def_path = osp.join(MODEL_DIR, model_name + '.py')
weights_path = osp.join(MODEL_DIR, model_name + '.pth')
mod = load_module_2or3(model_name, model_def_path)
func = getattr(mod, model_name)
net = func(weights_path=weights_path)
return net
def compose_transforms(meta,
resize=256,
center_crop=True,
override_meta_imsize=False):
"""Compose preprocessing transforms for model
The imported models use a range of different preprocessing options,
depending on how they were originally trained. Models trained in MatConvNet
typically require input images that have been scaled to [0,255], rather
than the [0,1] range favoured by PyTorch.
Args:
meta (dict): model preprocessing requirements
resize (int) [256]: resize the input image to this size
center_crop (bool) [True]: whether to center crop the image
override_meta_imsize (bool) [False]: if true, use the value of `resize`
to select the image input size, rather than the properties contained
in meta (this option only applies when center cropping is not used.
Return:
(transforms.Compose): Composition of preprocessing transforms
"""
normalize = transforms.Normalize(mean=meta['mean'], std=meta['std'])
im_size = meta['imageSize']
assert im_size[0] == im_size[1], 'expected square image size'
if center_crop:
transform_list = [
transforms.Resize(resize),
transforms.CenterCrop(size=(im_size[0], im_size[1]))
]
else:
if override_meta_imsize:
im_size = (resize, resize)
transform_list = [transforms.Resize(size=(im_size[0], im_size[1]))]
transform_list += [transforms.ToTensor()]
if meta['std'] == [1, 1, 1]: # common amongst mcn models
transform_list += [lambda x: x * 255.0]
transform_list.append(normalize)
return transforms.Compose(transform_list)
def augment_transforms(meta,
resize=256,
random_crop=True,
override_meta_imsize=False):
normalize = transforms.Normalize(mean=meta['mean'], std=meta['std'])
im_size = meta['imageSize']
assert im_size[0] == im_size[1], 'expected square image size'
if random_crop:
v = random.random()
transform_list = [
transforms.Resize(resize),
RandomCrop(im_size[0], v),
RandomHorizontalFlip(v)
]
else:
if override_meta_imsize:
im_size = (resize, resize)
transform_list = [transforms.Resize(size=(im_size[0], im_size[1]))]
transform_list += [transforms.ToTensor()]
if meta['std'] == [1, 1, 1]: # common amongst mcn models
transform_list += [lambda x: x * 255.0]
transform_list.append(normalize)
return transforms.Compose(transform_list)
class RandomCrop(object):
def __init__(self, size, v):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.v = v
def __call__(self, img):
w, h = img.size
th, tw = self.size
x1 = int((w - tw) * self.v)
y1 = int((h - th) * self.v)
#print("print x, y:", x1, y1)
assert (img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_image = img
else:
out_image = img.crop(
(x1, y1, x1 + tw, y1 +
th)) #same cropping method for all images in the same group
return out_image
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __init__(self, v):
self.v = v
return
def __call__(self, img):
if self.v < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
#print ("horiontal flip: ",self.v)
return img
def get_vec(model, layer_name, image):
bs = image.size(0)
if parsed.layer_name == 'pool5_full':
layer_name = 'pool5'
layer = model._modules.get(layer_name)
if parsed.layer_name == 'fc7':
layer_output_size = 4096
my_embedding = torch.zeros(bs, layer_output_size)
elif parsed.layer_name == 'fc8':
my_embedding = torch.zeros(bs, 7)
elif parsed.layer_name == 'pool5' or parsed.layer_name == 'pool5_full':
my_embedding = torch.zeros([bs, 512, 7, 7])
elif parsed.layer_name == 'pool4':
my_embedding = torch.zeros([bs, 512, 14, 14])
elif parsed.layer_name == 'pool3':
my_embedding = torch.zeros([bs, 256, 28, 28])
elif parsed.layer_name == 'pool5_7x7_s1':
my_embedding = torch.zeros([bs, 2048, 1, 1])
elif parsed.layer_name == 'conv5_3_3x3_relu':
my_embedding = torch.zeros([bs, 512, 7, 7])
def copy_data(m, i, o):
my_embedding.copy_(o.data)
h = layer.register_forward_hook(copy_data)
h_x = model(image)
h.remove()
if parsed.layer_name == 'pool5' or parsed.layer_name == 'conv5_3_3x3_relu':
GAP_layer = nn.AvgPool2d(kernel_size=[7, 7], stride=(1, 1))
my_embedding = GAP_layer(my_embedding)
return F.relu(my_embedding.squeeze())
def get_frame_index(frame_path):
frame_name = frame_path.split('/')[-1]
frame_num = int(frame_name.split('.')[0].split('_')[-1])
return frame_num
def predict(data_loader, layer_name, model, des_dir):
with torch.no_grad():
for ims, img_path in tqdm(data_loader):
ims = ims.cuda()
output = get_vec(model, layer_name, ims)
if not len(output.shape) == 2:
output = [
output,
]
img_path = [
img_path,
]
for feature, path in zip(output, img_path):
basename = osp.basename(path)
des_basename = basename.split('.')[0] + '.npy'
des_path = path.replace(basename, des_basename)
np.save(des_path, feature)
def feature_extraction(model, loader, des_dir):
model.eval()
predict(loader, parsed.layer_name, model, des_dir)
def main():
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
MODEL_DIR = params.MODEL_DIR
model_name = 'resnet50_ferplus_dag'
model = load_model(model_name, MODEL_DIR)
model = model.cuda()
meta = model.meta
preproc_transforms = compose_transforms(
meta, center_crop=False) if not parsed.augment else augment_transforms(
meta, random_crop=True)
if parsed.dataset == 'SAMM':
dataset = SAMMDataset(params.SAMM_VIDEO_DIR, preproc_transforms)
# parsed.save_root = params.SAMM_FEATURE_DIR
elif parsed.dataset == 'CASME_2':
dataset = CASME_2Dataset(params.CASME_2_VIDEO_DIR, preproc_transforms)
# parsed.save_root = params.CASME_2_FEATURE_DIR
else:
raise NotImplementedError
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=4,
num_workers=0,
pin_memory=False)
des_dir = None
# des_dir = osp.join(
# parsed.save_root, '_'.join([
# '{}_features'.format(model_name), 'fps=' + str(parsed.fps),
# parsed.layer_name
# ]))
# os.makedirs(des_dir, exist_ok=True)
feature_extraction(model, data_loader, des_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run.')
parser.add_argument('--refresh',
dest='refresh',
action='store_true',
help='refresh feature cache')
parser.add_argument('--fps',
type=int,
default=0,
help='frames per second to extract')
parser.add_argument('--layer_name', type=str, default='pool5_7x7_s1')
parser.add_argument(
'--augment',
action="store_true",
help='whether to extract augmented features for train set only ')
parser.add_argument('--dataset', type=str, default='')
parsed = parser.parse_args()
parsed.dataset = 'SAMM'
main()
| Python | 341 | 32.498535 | 80 | /preprocess/CNN_feature_extraction.py | 0.591701 | 0.578044 |
guanjz20/MM21_FME_solution | refs/heads/master | import os
import os.path as osp
from tqdm import tqdm
from glob import glob
from video_processor import Video_Processor
import params
# OpenFace parameters
save_size = 224
OpenFace_exe = params.OpenFace_exe
quiet = True
nomask = True
grey = False
tracked_vid = False
noface_save = False
# dataset
video_root = params.video_root
# main
video_processor = Video_Processor(save_size, nomask, grey, quiet, tracked_vid,
noface_save, OpenFace_exe)
video_ps = list(glob(osp.join(video_root, '*/*mp4')))
video_ps.extend(list(glob(osp.join(video_root, '*/*avi'))))
for video_p in tqdm(video_ps):
video_name = os.path.basename(video_p).split('.')[0]
opface_output_dir = os.path.join(os.path.dirname(video_p),
video_name + "_opface")
video_processor.process(video_p, opface_output_dir)
| Python | 31 | 26.806452 | 78 | /preprocess/openface/face_crop_align.py | 0.667053 | 0.661253 |
guanjz20/MM21_FME_solution | refs/heads/master | import torch.nn as nn
def init_weights(model):
for k, m in model.named_modules():
if isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
# nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
| Python | 18 | 41.166668 | 76 | /model/utils.py | 0.520422 | 0.509881 |
guanjz20/MM21_FME_solution | refs/heads/master | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from genericpath import exists
import os
from typing import Final
import cv2
import sys
from matplotlib.pyplot import xcorr
from numpy.random import f, sample, shuffle
from torch.utils.data import dataset
from config import parser
if len(sys.argv) > 1:
# use shell args
args = parser.parse_args()
print('Use shell args.')
else:
# Debug
args_list = [
'--dataset',
'SAMM',
'--print-freq',
'1',
'--snap',
'debug',
'--data_option',
'wt_diff',
'--gpus',
'0',
'--batch_size',
'2',
'--input_size',
'128',
'--length',
'64',
'-L',
'12',
'--workers',
'0',
]
args = parser.parse_args(args_list)
# os setting
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
if args.gpus is not None:
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
import re
import logging
import time
import torch
import os.path as osp
import torch.nn as nn
import numpy as np
import pandas as pd
import torch.distributed as dist
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from datetime import datetime
from tqdm import tqdm
from pprint import pformat
from timm.utils import setup_default_logging, NativeScaler, reduce_tensor, distribute_bn
from timm.data.distributed_sampler import OrderedDistributedSampler
from contextlib import suppress
from model.network import Two_Stream_RNN_Cls, load_pretrained_model
from dataset.me_dataset import SAMMDataset, CASME_2Dataset
import utils
import trainer_cls as trainer
# torch.multiprocessing.set_start_method('spawn')
torch.backends.cudnn.benchmark = True
# check resume
RESUME = osp.exists(args.resume)
# check finetune
if len(args.finetune_list) > 0:
assert RESUME
FINETUNE = True
else:
FINETUNE = False
_logger = logging.getLogger('train')
# resume
if RESUME:
setattr(args, 'save_root', 'results/{}'.format(osp.basename(args.resume)))
else:
snapshot_name = '_'.join(
[args.snap, datetime.now().strftime("%Y%m%d-%H%M%S")])
if len(args.store_name) == 0:
args.store_name = snapshot_name
setattr(args, 'save_root', 'results/{}'.format(args.store_name))
# make dirs
if args.local_rank == 0:
utils.check_rootfolders(args)
else:
time.sleep(1)
# setup logging
setup_default_logging(
log_path=os.path.join(args.save_root, args.root_log, 'run.log'))
_logger.info("save experiment to :{}".format(args.save_root))
# save args
if args.local_rank == 0:
args_string = pformat(args.__dict__)
_logger.info(args_string)
# reset random
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
# if distributed
if args.distributed and 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl', init_method='env://')
args.world_size = dist.get_world_size()
args.rank = dist.get_rank()
_logger.info(
'Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
# else:
# _logger.info('Training with a single process on 1 GPUs.')
assert args.rank >= 0
utils.synchronize()
# loss_fn
criterion = utils.Focal_Loss(alpha=args.focal_alpha)
# leave one subject out cross validation
img_dirs = utils.get_img_dirs(args.dataset)
img_dirs_dict = utils.leave_one_out(
img_dirs, args.dataset) # key -> [train_set, val_set]
# finetuen and resume
if RESUME:
total_MNA = np.load(osp.join(args.resume, args.root_output,
'cross_validation_MNA_dict.npy'),
allow_pickle=True).item()
match_regions_record_all = np.load(osp.join(
args.resume, args.root_output, 'match_regions_record_all.npy'),
allow_pickle=True).item()
if not FINETUNE:
keys1 = list(total_MNA.keys())
# keys2 = list(match_regions_record_all.keys())
rm_key = keys1[-1] # after python 3.6, order is guaranteed
if args.delete_last:
# delete the last subject results
total_MNA, match_regions_record_all = utils.delete_records(
total_MNA, match_regions_record_all, rm_key)
if args.local_rank == 0:
_logger.info('resume from subject {} (include)'.format(rm_key))
elif args.local_rank == 0:
_logger.info('resume from subject {} (not include)'.format(rm_key))
else:
if args.local_rank == 0:
_logger.info('finetune subjects: [{}]'.format(','.join(
args.finetune_list)))
else:
total_MNA = {} # store all cross-validation results
match_regions_record_all = {}
utils.synchronize()
for vi, (val_id, [train_dirs, val_dirs]) in enumerate(img_dirs_dict.items()):
# leave {val_id} out...
# FINETUNE has higher priority than RESUME
if FINETUNE and (val_id not in args.finetune_list):
continue # skip subjects that do not need finetune
if RESUME and (not FINETUNE) and (val_id in total_MNA):
continue # skip from resume
if val_id in args.finetune_list:
# delete records
total_MNA, match_regions_record_all = utils.delete_records(
total_MNA, match_regions_record_all, val_id)
if args.data_option == 'diff':
inchannel = args.L
elif args.data_option == 'wt_diff':
inchannel = 4 * args.L
elif args.data_option == 'wt_dr':
inchannel = (
args.L + 1 - 11 +
1) * 2 * 4 # gauss kernel size = 11, *2 = dr1,dr2, *4 = 4 bands
# amp
amp_autocast = suppress # do nothing
loss_scaler = None
if args.amp:
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info(
'Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# model
model = Two_Stream_RNN_Cls(mlp_hidden_units=args.hidden_units,
inchannel=inchannel,
outchannel=2)
# load pretrained
if osp.exists(args.load_pretrained):
model = load_pretrained_model(model, args.load_pretrained,
args.load_bn)
if args.local_rank == 0:
_logger.info('Load pretrained model from {}[load_bn: {}]'.format(
args.load_pretrained, args.load_bn))
# pytorch_total_params = sum(p.numel() for p in model.parameters()
# if p.requires_grad)
# print("Total Params: {}".format(pytorch_total_params))
model = model.cuda()
# setup synchronized BatchNorm for distributed training
if args.distributed:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# if args.local_rank == 0:
# _logger.info(
# 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
# 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.'
# )
# optimizer
if args.optim == 'SGD':
optimizer = torch.optim.SGD(
[p for p in model.parameters() if p.requires_grad],
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optim == 'Adam':
optimizer = torch.optim.Adam(
[p for p in model.parameters() if p.requires_grad],
args.lr,
weight_decay=args.weight_decay)
else:
raise NotImplementedError
# setup distributed training
if args.distributed:
model = DistributedDataParallel(model,
device_ids=[args.local_rank],
find_unused_parameters=True)
else:
model = DataParallel(model).cuda()
# dataset
Dataset = SAMMDataset if args.dataset == 'SAMM' else CASME_2Dataset
def create_dataset():
train_dataset = Dataset(
mode='train',
img_dirs=train_dirs,
seq_len=args.length,
step=args.step,
# step=1000, # !!
time_len=args.L,
input_size=args.input_size,
data_aug=args.data_aug,
data_option=args.data_option)
val_dataset = Dataset(
mode='test',
img_dirs=val_dirs,
seq_len=args.length,
step=args.length, # assert no overlap
# step=1000, # !!
time_len=args.L,
input_size=args.input_size,
data_aug=False,
data_option=args.data_option)
return train_dataset, val_dataset
train_dataset, val_dataset = create_dataset()
if args.distributed:
val_sampler = OrderedDistributedSampler(val_dataset)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
else:
val_sampler = None
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset,
shuffle=train_sampler is None,
sampler=train_sampler,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
pin_memory=False)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=val_sampler,
num_workers=0,
pin_memory=False,
drop_last=False)
if args.local_rank == 0:
_logger.info('<' * 10 + ' {} '.format(val_id) + '<' * 10)
best_f_score = -1000.0
best_loss = 1000.0
val_accum_epochs = 0
for epoch in range(args.epochs):
if train_sampler is not None:
train_sampler.set_epoch(epoch)
utils.adjust_learning_rate(optimizer, epoch, args.lr,
args.weight_decay, args.lr_steps,
args.lr_decay_factor)
trainer.train(train_loader, model, criterion, optimizer, epoch,
_logger, args, amp_autocast, loss_scaler)
utils.synchronize()
# bn syn
if args.distributed:
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size,
True) # true for reduce, false for broadcast
# logging
if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
loss_val, pred_and_gt = trainer.validate(val_loader, model,
criterion, _logger, args,
amp_autocast)
# distributed synchronize
pred_and_gt = utils.synchronize_pred_and_gt(
pred_and_gt, epoch, args)
# eval
if args.local_rank == 0:
precision, recall, f_score, MNA, match_regions_record = utils.evaluate_bi_labels(
pred_and_gt, val_id, epoch, args)
else:
f_score = -10.0
MNA = (0, 0, 0)
# precision, recall, f_score, MNA, match_regions_record = utils.evaluate_bi_labels(
# pred_and_gt, val_id, epoch, args)
utils.synchronize()
# synchronize
f_score = utils.synchronize_f_score(f_score, args)
_logger.info('f_score of processor {}: {:.4f}'.format(
args.local_rank, f_score))
MNA = utils.synchronize_list(MNA, args)
_logger.info('MNA of processor {}: {}'.format(
args.local_rank, MNA))
is_equal_score = f_score == best_f_score
is_best_loss = loss_val < best_loss
best_loss = min(loss_val, best_loss)
is_best_score = f_score > best_f_score
best_f_score = max(best_f_score, f_score)
# save checkpoint
if args.local_rank == 0:
_logger.info(
'Test[{}]: loss_val: {:.4f} (best: {:.4f}), f-score: {:.4f} (best: {:.4f})'
.format(epoch, loss_val, best_loss, f_score, best_f_score))
utils.save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
},
is_best_score,
args.save_root,
args.root_model,
filename=val_id)
utils.synchronize()
if is_best_score or (is_equal_score and
MNA[1] < total_MNA.get(val_id, [0, 0, 0])[1]):
val_accum_epochs = 0
total_MNA.update(
{val_id:
MNA}) # processor 0 need this record for branch selection
if args.local_rank == 0:
match_regions_record_all.update(
match_regions_record
) # only processor 0 need this record
out_dir = osp.join(args.save_root, args.root_output,
val_id)
os.makedirs(out_dir, exist_ok=True)
np.save(osp.join(out_dir, 'match_regions_record_best.npy'),
match_regions_record)
# all
np.save(
osp.join(args.save_root, args.root_output,
'cross_validation_MNA_dict.npy'), total_MNA)
np.save(
osp.join(args.save_root, args.root_output,
'match_regions_record_all.npy'),
match_regions_record_all)
precision, recall, f_score = utils.calculate_metric_from_dict_MNA(
total_MNA)
_logger.info(
'Test[all] Avg f-score now: {:.4f}'.format(f_score))
utils.synchronize()
else:
val_accum_epochs += 1
if val_accum_epochs >= args.early_stop:
_logger.info(
"validation ccc did not improve over {} epochs, stop processor {}"
.format(args.early_stop, args.local_rank))
break
if args.local_rank == 0:
precision_all, recall_all, f_score_all = utils.calculate_metric_from_dict_MNA(
total_MNA)
_logger.critical(
'[{}][{}]/[{}] f_score: {:.4f}, precision_all: {:.4f}, recall_all: {:.4f}, f_score_all: {:.4f}'
.format(val_id, vi + 1, len(img_dirs_dict), best_f_score,
precision_all, recall_all, f_score_all))
# store results
if args.local_rank == 0:
np.save(
osp.join(args.save_root, args.root_output,
'cross_validation_MNA_dict.npy'), total_MNA)
np.save(
osp.join(args.save_root, args.root_output,
'match_regions_record_all.npy'), match_regions_record_all)
_logger.info('ALL DONE')
exit()
| Python | 437 | 36.016018 | 107 | /main_cls.py | 0.542718 | 0.53499 |
guanjz20/MM21_FME_solution | refs/heads/master | from albumentations.augmentations.transforms import GaussNoise
import cv2
import os
import numpy as np
import os.path as osp
import albumentations as alb
# from torch._C import Ident
# from torch.nn.modules.linear import Identity
class IsotropicResize(alb.DualTransform):
def __init__(self,
max_side,
interpolation_down=cv2.INTER_AREA,
interpolation_up=cv2.INTER_CUBIC,
always_apply=False,
p=1):
super(IsotropicResize, self).__init__(always_apply, p)
self.max_side = max_side
self.interpolation_down = interpolation_down
self.interpolation_up = interpolation_up
def apply(self,
img,
interpolation_down=cv2.INTER_AREA,
interpolation_up=cv2.INTER_CUBIC,
**params):
return isotropically_resize_image(
img,
size=self.max_side,
interpolation_down=interpolation_down,
interpolation_up=interpolation_up)
def apply_to_mask(self, img, **params):
return self.apply(img,
interpolation_down=cv2.INTER_NEAREST,
interpolation_up=cv2.INTER_NEAREST,
**params)
def get_transform_init_args_names(self):
return ("max_side", "interpolation_down", "interpolation_up")
class Identity():
def __init__(self):
pass
def __call__(self, x):
return x
class GroupTrainTransform():
def __init__(self):
self.ImageCompression = alb.ImageCompression(quality_lower=60,
quality_upper=100,
p=1),
self.GaussNoise = alb.GaussNoise(p=1),
self.GaussianBlur = alb.GaussianBlur(blur_limit=(3, 5), p=1),
self.HorizontalFlip = alb.HorizontalFlip(p=1),
self.LightChange = alb.OneOf([
alb.RandomBrightnessContrast(),
alb.FancyPCA(),
alb.HueSaturationValue()
],
p=1),
self.ShiftRotate = alb.ShiftScaleRotate(
shift_limit=0.1,
scale_limit=0.2,
rotate_limit=10,
border_mode=cv2.BORDER_CONSTANT,
p=1),
def _apply_aug(imgs, aug_method):
for i, img in enumerate(imgs):
imgs[i] = aug_method(image=img)['image']
return imgs
def __call__(self, imgs):
# img compress
if np.random.random() < 0.3:
imgs = self._apply_aug(imgs, self.ImageCompression)
# gauss noise
if np.random.random() < 0.1:
imgs = self._apply_aug(imgs, self.GaussNoise)
# gauss blur
if np.random.random() < 0.05:
imgs = self._apply_aug(imgs, self.GaussianBlur)
# flip
if np.random.random() < 0.5:
imgs = self._apply_aug(imgs, self.HorizontalFlip)
# light
if np.random.random() < 0.5:
imgs = self._apply_aug(imgs, self.LightChange)
# shift rotate
if np.random.random() < 0.5:
imgs = self._apply_aug(imgs, self.ShiftRotate)
return imgs
class GroupTestTransform(Identity):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_group_transform(mode):
if mode == 'train':
return GroupTrainTransform()
elif mode == 'test':
return GroupTestTransform()
else:
raise (NotImplementedError)
def isotropically_resize_image(img,
size,
interpolation_down=cv2.INTER_AREA,
interpolation_up=cv2.INTER_CUBIC):
h, w = img.shape[:2]
if max(w, h) == size:
return img
if w > h:
scale = size / w
h = h * scale
w = size
else:
scale = size / h
w = w * scale
h = size
interpolation = interpolation_up if scale > 1 else interpolation_down
resized = cv2.resize(img, (int(w), int(h)), interpolation=interpolation)
return resized
def get_transform(mode, size):
if mode == 'train':
return get_train_transform(size)
elif mode == 'test':
return get_test_transform(size)
else:
raise (NotImplementedError)
def get_test_transform(size):
return alb.Compose([
IsotropicResize(max_side=size),
alb.PadIfNeeded(min_height=size,
min_width=size,
border_mode=cv2.BORDER_CONSTANT),
])
def get_train_transform(size):
return alb.Compose([
# alb.GaussNoise(p=0.1),
# alb.GaussianBlur(blur_limit=(3, 5), p=0.1),
alb.HorizontalFlip(),
alb.OneOf([
IsotropicResize(max_side=size,
interpolation_down=cv2.INTER_AREA,
interpolation_up=cv2.INTER_CUBIC),
IsotropicResize(max_side=size,
interpolation_down=cv2.INTER_AREA,
interpolation_up=cv2.INTER_LINEAR),
IsotropicResize(max_side=size,
interpolation_down=cv2.INTER_LINEAR,
interpolation_up=cv2.INTER_LINEAR),
],
p=1),
alb.PadIfNeeded(min_height=size,
min_width=size,
border_mode=cv2.BORDER_CONSTANT),
# alb.OneOf([
# alb.RandomBrightnessContrast(),
# alb.FancyPCA(),
# alb.HueSaturationValue()
# ],
# p=0.5),
# alb.ToGray(p=0.2),
# alb.ShiftScaleRotate(shift_limit=0.1,
# scale_limit=0.1,
# rotate_limit=5,
# border_mode=cv2.BORDER_CONSTANT,
# p=0.5),
])
def scan_jpg_from_img_dir(img_dir):
img_ps = [
osp.join(img_dir, name)
for name in sorted(os.listdir(img_dir),
key=lambda x: int(x.split('.')[0].split('_')[-1]))
if '.jpg' in name # !! sort key
]
return img_ps | Python | 195 | 30.897436 | 77 | /dataset/utils.py | 0.521145 | 0.509085 |
gowtham59/fgh | refs/heads/master | f12,f22=input().split()
f22=int(f22)
for y in range(f22):
print(f12)
| Python | 4 | 16.75 | 23 | /g.py | 0.661972 | 0.492958 |
wendeehsu/MangoClassification | refs/heads/master | """# Load libraries"""
import os, shutil
import matplotlib.pyplot as plt
import numpy as np
import random
import pandas as pd
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Activation, Dense, GlobalAveragePooling2D, Dropout
from keras.layers.core import Flatten
from keras.models import Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
import tensorflow as tf
"""# Check files"""
path = "./C1-P1_Train/"
class_names = ["A","B","C"]
classNum = len(class_names)
"""# Load Data"""
traindf=pd.read_csv("train.csv", header=None)
traindf = traindf.rename(columns={0: "name", 1: "class"})
print(traindf.head())
target_size = (224,224)
batch_size = 20
#ImageDataGenerator() 可以做一些影像處理的動作
datagen = ImageDataGenerator(
rescale = 1./255,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
brightness_range=[0.2,1.0],
fill_mode='nearest',
validation_split=0.2)
#以 batch 的方式讀取資料
train_batches = datagen.flow_from_dataframe(
dataframe=traindf,
directory=path,
x_col="name",
y_col="class",
target_size = target_size,
batch_size = batch_size,
subset='training')
valid_batches = datagen.flow_from_dataframe(
dataframe=traindf,
directory=path,
x_col="name",
y_col="class",
target_size = target_size,
batch_size = batch_size,
subset='validation')
"""# Build model"""
net = InceptionV3(include_top=False, weights="imagenet")
x = net.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
output_layer = Dense(classNum, activation='softmax')(x)
FREEZE_LAYERS = 2
# 設定凍結與要進行訓練的網路層
net_final = Model(inputs=net.input, outputs=output_layer)
for layer in net_final.layers[:FREEZE_LAYERS]:
layer.trainable = False
for layer in net_final.layers[FREEZE_LAYERS:]:
layer.trainable = True
# 使用 Adam optimizer,以較低的 learning rate 進行 fine-tuning
net_final.compile(optimizer=Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
history = net_final.fit_generator(train_batches,
steps_per_epoch = train_batches.samples // batch_size,
validation_data=valid_batches,
validation_steps = valid_batches.samples // batch_size,
epochs=30)
net_final.save("models/mango_Incept.h5")
STEP_SIZE_VALID = valid_batches.n // valid_batches.batch_size
result = net_final.evaluate_generator(generator=valid_batches, steps=STEP_SIZE_VALID, verbose=1)
print("result = ", result)
# plot metrics
plt.plot(history.history['accuracy'])
plt.show()
plt.savefig('accuracy.jpg')
| Python | 94 | 29.223404 | 97 | /train_incept.py | 0.679099 | 0.662913 |
wendeehsu/MangoClassification | refs/heads/master | import os, shutil
import random
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
path = "/C1-P1_Dev"
class_names = ["A","B","C"]
dic = {}
for className in class_names:
dir = path+"/"+className
files = os.listdir(dir)
imageNum = len(files)
randomNums = random.sample(range(imageNum), imageNum)
dic[className] = imageNum
plt.bar(range(len(dic)), list(dic.values()), align='center')
plt.xticks(range(len(dic)), list(dic.keys()))
print(dic)
plt.show()
target_size = (224,224)
batch_size = 1
#ImageDataGenerator() 可以做一些影像處理的動作
datagen = ImageDataGenerator(rescale = 1./255,)
#以 batch 的方式讀取資料
predict_batches = datagen.flow_from_directory(
path,
shuffle=False,
target_size = target_size,
batch_size = batch_size,
classes = class_names)
resnet = load_model("models/mango_resnet152.h5")
# print(resnet.summary())
filenames = predict_batches.filenames
nb_samples = len(filenames)
predict = resnet.predict(predict_batches, steps = nb_samples, verbose = 1)
y_pred = np.argmax(predict, axis=1)
print('confusion matrix')
print(confusion_matrix(predict_batches.classes, y_pred))
print(classification_report(predict_batches.classes, y_pred, target_names=class_names))
| Python | 54 | 30.037037 | 87 | /test.py | 0.741647 | 0.729117 |
wendeehsu/MangoClassification | refs/heads/master | """# Load libraries"""
import os, shutil
import matplotlib.pyplot as plt
import numpy as np
import random
import pandas as pd
from keras.applications.resnet import ResNet152
from keras.layers.core import Dense, Flatten
from keras.layers import Activation,Dropout
from keras.models import Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
"""# Check files"""
path = "./C1-P1_Train/"
class_names = ["A","B","C"]
classNum = len(class_names)
"""# Load Data"""
traindf=pd.read_csv("train.csv", header=None)
traindf = traindf.rename(columns={0: "name", 1: "class"})
print(traindf.head())
target_size = (224,224)
batch_size = 20
#ImageDataGenerator() 可以做一些影像處理的動作
datagen = ImageDataGenerator(
rescale = 1./255,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
brightness_range=[0.2,1.0],
fill_mode='nearest',
validation_split=0.2)
#以 batch 的方式讀取資料
train_batches = datagen.flow_from_dataframe(
dataframe=traindf,
directory=path,
x_col="name",
y_col="class",
target_size = target_size,
batch_size = batch_size,
subset='training')
valid_batches = datagen.flow_from_dataframe(
dataframe=traindf,
directory=path,
x_col="name",
y_col="class",
target_size = target_size,
batch_size = batch_size,
subset='validation')
"""# Build model"""
# 凍結網路層數
FREEZE_LAYERS = 2
net = ResNet152(include_top=False,
weights="imagenet",
input_tensor=None,
input_shape=(target_size[0],target_size[1],classNum),
classes=classNum)
x = net.output
x = Flatten()(x)
# 增加 Dense layer,以 softmax 產生個類別的機率值
# x = Dense(256, activation='softmax', name='output2_layer')(x)
# 增加 DropOut layer
x = Dropout(0.5)(x)
output_layer = Dense(classNum, activation='softmax', name='softmax')(x)
# 設定凍結與要進行訓練的網路層
net_final = Model(inputs=net.input, outputs=output_layer)
for layer in net_final.layers[:FREEZE_LAYERS]:
layer.trainable = False
for layer in net_final.layers[FREEZE_LAYERS:]:
layer.trainable = True
# 使用 Adam optimizer,以較低的 learning rate 進行 fine-tuning
net_final.compile(optimizer=Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
# 輸出整個網路結構
# print(net_final.summary())
"""# Train"""
# 訓練模型
history = net_final.fit(train_batches,
steps_per_epoch = train_batches.samples // batch_size,
validation_data = valid_batches,
validation_steps = valid_batches.samples // batch_size,
epochs = 30)
net_final.save("models/mango_resnet152.h5")
| Python | 99 | 26.464647 | 97 | /train.py | 0.651838 | 0.632353 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.