id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
21,927
from .validate import Validator, validated from collections import ChainMap class Structure(metaclass=StructureMeta): _fields = () _types = () def __setattr__(self, name, value): if name.startswith('_') or name in self._fields: super().__setattr__(name, value) else: raise AttributeError('No attribute %s' % name) def __repr__(self): return '%s(%s)' % (type(self).__name__, ', '.join(repr(getattr(self, name)) for name in self._fields)) def __iter__(self): for name in self._fields: yield getattr(self, name) def __eq__(self, other): return isinstance(other, type(self)) and tuple(self) == tuple(other) def from_row(cls, row): rowdata = [ func(val) for func, val in zip(cls._types, row) ] return cls(*rowdata) def create_init(cls): ''' Create an __init__ method from _fields ''' args = ','.join(cls._fields) code = f'def __init__(self, {args}):\n' for name in cls._fields: code += f' self.{name} = {name}\n' locs = { } exec(code, locs) cls.__init__ = locs['__init__'] def __init_subclass__(cls): # Apply the validated decorator to subclasses validate_attributes(cls) def typed_structure(clsname, **validators): cls = type(clsname, (Structure,), validators) return cls
null
21,928
from abc import ABC, abstractmethod class TableFormatter(ABC): def headings(self, headers): pass def row(self, rowdata): pass def print_table(records, fields, formatter): if not isinstance(formatter, TableFormatter): raise RuntimeError('Expected a TableFormatter') formatter.headings(fields) for r in records: rowdata = [getattr(r, fieldname) for fieldname in fields] formatter.row(rowdata)
null
21,929
from inspect import signature from functools import wraps def enforce(**annotations): retcheck = annotations.pop('return_', None) def decorate(func): sig = signature(func) @wraps(func) def wrapper(*args, **kwargs): bound = sig.bind(*args, **kwargs) errors = [] # Enforce argument checks for name, validator in annotations.items(): try: validator.check(bound.arguments[name]) except Exception as e: errors.append(f' {name}: {e}') if errors: raise TypeError('Bad Arguments\n' + '\n'.join(errors)) result = func(*args, **kwargs) if retcheck: try: retcheck.check(result) except Exception as e: raise TypeError(f'Bad return: {e}') from None return result return wrapper return decorate
null
21,930
from inspect import signature from functools import wraps def add(x:Integer, y:Integer) -> Integer: return x + y
null
21,931
from inspect import signature from functools import wraps def div(x:Integer, y:Integer) -> Integer: return x / y
null
21,932
from inspect import signature from functools import wraps def sub(x, y): return x - y
null
21,933
import csv import logging def csv_as_dicts(lines, types, *, headers=None): return convert_csv(lines, lambda headers, row: { name: func(val) for name, func, val in zip(headers, types, row) }) The provided code snippet includes necessary dependencies for implementing the `read_csv_as_dicts` function. Write a Python function `def read_csv_as_dicts(filename, types, *, headers=None)` to solve the following problem: Read CSV data into a list of dictionaries with optional type conversion Here is the function: def read_csv_as_dicts(filename, types, *, headers=None): ''' Read CSV data into a list of dictionaries with optional type conversion ''' with open(filename) as file: return csv_as_dicts(file, types, headers=headers)
Read CSV data into a list of dictionaries with optional type conversion
21,934
import csv import logging def csv_as_instances(lines, cls, *, headers=None): return convert_csv(lines, lambda headers, row: cls.from_row(row)) The provided code snippet includes necessary dependencies for implementing the `read_csv_as_instances` function. Write a Python function `def read_csv_as_instances(filename, cls, *, headers=None)` to solve the following problem: Read CSV data into a list of instances Here is the function: def read_csv_as_instances(filename, cls, *, headers=None): ''' Read CSV data into a list of instances ''' with open(filename) as file: return csv_as_instances(file, cls, headers=headers)
Read CSV data into a list of instances
21,935
from validate import Validator, validated from collections import ChainMap class Validator: def check(cls, value): return value def validated(func): sig = signature(func) # Gather the function annotations annotations = { name:val for name, val in func.__annotations__.items() if isvalidator(val) } # Get the return annotation (if any) retcheck = annotations.pop('return', None) def wrapper(*args, **kwargs): bound = sig.bind(*args, **kwargs) errors = [] # Enforce argument checks for name, validator in annotations.items(): try: validator.check(bound.arguments[name]) except Exception as e: errors.append(f' {name}: {e}') if errors: raise TypeError('Bad Arguments\n' + '\n'.join(errors)) result = func(*args, **kwargs) # Enforce return check (if any) if retcheck: try: retcheck.check(result) except Exception as e: raise TypeError(f'Bad return: {e}') from None return result return wrapper The provided code snippet includes necessary dependencies for implementing the `validate_attributes` function. Write a Python function `def validate_attributes(cls)` to solve the following problem: Class decorator that scans a class definition for Validators and builds a _fields variable that captures their definition order. Here is the function: def validate_attributes(cls): ''' Class decorator that scans a class definition for Validators and builds a _fields variable that captures their definition order. ''' validators = [] for name, val in vars(cls).items(): if isinstance(val, Validator): validators.append(val) # Apply validated decorator to any callable with annotations elif callable(val) and val.__annotations__: setattr(cls, name, validated(val)) # Collect all of the field names cls._fields = tuple([v.name for v in validators]) # Collect type conversions. The lambda x:x is an identity # function that's used in case no expected_type is found. cls._types = tuple([ getattr(v, 'expected_type', lambda x: x) for v in validators ]) # Create the __init__ method if cls._fields: cls.create_init() return cls
Class decorator that scans a class definition for Validators and builds a _fields variable that captures their definition order.
21,936
from validate import Validator, validated from collections import ChainMap class Structure(metaclass=StructureMeta): _fields = () _types = () def __setattr__(self, name, value): if name.startswith('_') or name in self._fields: super().__setattr__(name, value) else: raise AttributeError('No attribute %s' % name) def __repr__(self): return '%s(%s)' % (type(self).__name__, ', '.join(repr(getattr(self, name)) for name in self._fields)) def __iter__(self): for name in self._fields: yield getattr(self, name) def __eq__(self, other): return isinstance(other, type(self)) and tuple(self) == tuple(other) def from_row(cls, row): rowdata = [ func(val) for func, val in zip(cls._types, row) ] return cls(*rowdata) def create_init(cls): ''' Create an __init__ method from _fields ''' args = ','.join(cls._fields) code = f'def __init__(self, {args}):\n' for name in cls._fields: code += f' self.{name} = {name}\n' locs = { } exec(code, locs) cls.__init__ = locs['__init__'] def __init_subclass__(cls): # Apply the validated decorator to subclasses validate_attributes(cls) def typed_structure(clsname, **validators): cls = type(clsname, (Structure,), validators) return cls
null
21,937
from inspect import signature from functools import wraps def isvalidator(item): return isinstance(item, type) and issubclass(item, Validator) def validated(func): sig = signature(func) # Gather the function annotations annotations = { name:val for name, val in func.__annotations__.items() if isvalidator(val) } # Get the return annotation (if any) retcheck = annotations.pop('return', None) @wraps(func) def wrapper(*args, **kwargs): bound = sig.bind(*args, **kwargs) errors = [] # Enforce argument checks for name, validator in annotations.items(): try: validator.check(bound.arguments[name]) except Exception as e: errors.append(f' {name}: {e}') if errors: raise TypeError('Bad Arguments\n' + '\n'.join(errors)) result = func(*args, **kwargs) # Enforce return check (if any) if retcheck: try: retcheck.check(result) except Exception as e: raise TypeError(f'Bad return: {e}') from None return result return wrapper
null
21,944
import os import time The provided code snippet includes necessary dependencies for implementing the `follow` function. Write a Python function `def follow(filename)` to solve the following problem: Generator that produces a sequence of lines being written at the end of a file. Here is the function: def follow(filename): ''' Generator that produces a sequence of lines being written at the end of a file. ''' with open(filename,'r') as f: f.seek(0,os.SEEK_END) while True: line = f.readline() if line == '': time.sleep(0.1) # Sleep briefly to avoid busy wait continue yield line
Generator that produces a sequence of lines being written at the end of a file.
21,945
import csv def csv_as_dicts(lines, types, *, headers=None): return convert_csv(lines, lambda headers, row: { name: func(val) for name, func, val in zip(headers, types, row) }) The provided code snippet includes necessary dependencies for implementing the `read_csv_as_dicts` function. Write a Python function `def read_csv_as_dicts(filename, types, *, headers=None)` to solve the following problem: Read CSV data into a list of dictionaries with optional type conversion Here is the function: def read_csv_as_dicts(filename, types, *, headers=None): ''' Read CSV data into a list of dictionaries with optional type conversion ''' with open(filename) as file: return csv_as_dicts(file, types, headers=headers)
Read CSV data into a list of dictionaries with optional type conversion
21,946
import csv def csv_as_instances(lines, cls, *, headers=None): return convert_csv(lines, lambda headers, row: cls.from_row(row)) The provided code snippet includes necessary dependencies for implementing the `read_csv_as_instances` function. Write a Python function `def read_csv_as_instances(filename, cls, *, headers=None)` to solve the following problem: Read CSV data into a list of instances Here is the function: def read_csv_as_instances(filename, cls, *, headers=None): ''' Read CSV data into a list of instances ''' with open(filename) as file: return csv_as_instances(file, cls, headers=headers)
Read CSV data into a list of instances
21,947
import csv The provided code snippet includes necessary dependencies for implementing the `read_rides_as_tuples` function. Write a Python function `def read_rides_as_tuples(filename)` to solve the following problem: Read the bus ride data as a list of tuples Here is the function: def read_rides_as_tuples(filename): ''' Read the bus ride data as a list of tuples ''' records = [] with open(filename) as f: rows = csv.reader(f) headings = next(rows) # Skip headers for row in rows: route = row[0] date = row[1] daytype = row[2] rides = int(row[3]) record = (route, date, daytype, rides) records.append(record) return records
Read the bus ride data as a list of tuples
21,948
import csv The provided code snippet includes necessary dependencies for implementing the `read_rides_as_dicts` function. Write a Python function `def read_rides_as_dicts(filename)` to solve the following problem: Read the bus ride data as a list of dicts Here is the function: def read_rides_as_dicts(filename): ''' Read the bus ride data as a list of dicts ''' records = [] with open(filename) as f: rows = csv.reader(f) headings = next(rows) # Skip headers for row in rows: route = row[0] date = row[1] daytype = row[2] rides = int(row[3]) record = { 'route': route, 'date': date, 'daytype': daytype, 'rides' : rides } records.append(record) return records
Read the bus ride data as a list of dicts
21,949
import csv class Row: # Uncomment to see effect of slots # __slots__ = ('route', 'date', 'daytype', 'rides') def __init__(self, route, date, daytype, rides): self.route = route self.date = date self.daytype = daytype self.rides = rides The provided code snippet includes necessary dependencies for implementing the `read_rides_as_instances` function. Write a Python function `def read_rides_as_instances(filename)` to solve the following problem: Read the bus ride data as a list of instances Here is the function: def read_rides_as_instances(filename): ''' Read the bus ride data as a list of instances ''' records = [] with open(filename) as f: rows = csv.reader(f) headings = next(rows) # Skip headers for row in rows: route = row[0] date = row[1] daytype = row[2] rides = int(row[3]) record = Row(route, date, daytype, rides) records.append(record) return records
Read the bus ride data as a list of instances
21,950
def portfolio_cost(filename): total_cost = 0.0 with open(filename) as f: for line in f: fields = line.split() try: nshares = int(fields[1]) price = float(fields[2]) total_cost = total_cost + nshares * price # This catches errors in int() and float() conversions above except ValueError as e: print("Couldn't parse:", line) print("Reason:", e) return total_cost
null
21,953
from abc import ABC, abstractmethod class TableFormatter(ABC): def headings(self, headers): def row(self, rowdata): from .formats.text import TextTableFormatter from .formats.csv import CSVTableFormatter from .formats.html import HTMLTableFormatter def print_table(records, fields, formatter): if not isinstance(formatter, TableFormatter): raise RuntimeError('Expected a TableFormatter') formatter.headings(fields) for r in records: rowdata = [getattr(r, fieldname) for fieldname in fields] formatter.row(rowdata)
null
21,954
from abc import ABC, abstractmethod from .formats.text import TextTableFormatter from .formats.csv import CSVTableFormatter from .formats.html import HTMLTableFormatter class ColumnFormatMixin: def row(self, rowdata): class UpperHeadersMixin: def headings(self, headers): class TextTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): class CSVTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): class HTMLTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): def create_formatter(name, column_formats=None, upper_headers=False): if name == 'text': formatter_cls = TextTableFormatter elif name == 'csv': formatter_cls = CSVTableFormatter elif name == 'html': formatter_cls = HTMLTableFormatter else: raise RuntimeError('Unknown format %s' % name) if column_formats: class formatter_cls(ColumnFormatMixin, formatter_cls): formats = column_formats if upper_headers: class formatter_cls(UpperHeadersMixin, formatter_cls): pass return formatter_cls()
null
21,962
from validate import Validator, validated from collections import ChainMap class Structure(metaclass=StructureMeta): _fields = () _types = () def __setattr__(self, name, value): if name.startswith('_') or name in self._fields: super().__setattr__(name, value) else: raise AttributeError('No attribute %s' % name) def __repr__(self): return '%s(%s)' % (type(self).__name__, ', '.join(repr(getattr(self, name)) for name in self._fields)) def from_row(cls, row): rowdata = [ func(val) for func, val in zip(cls._types, row) ] return cls(*rowdata) def create_init(cls): ''' Create an __init__ method from _fields ''' args = ','.join(cls._fields) code = f'def __init__(self, {args}):\n' for name in cls._fields: code += f' self.{name} = {name}\n' locs = { } exec(code, locs) cls.__init__ = locs['__init__'] def __init_subclass__(cls): # Apply the validated decorator to subclasses validate_attributes(cls) def typed_structure(clsname, **validators): cls = type(clsname, (Structure,), validators) return cls
null
21,964
from abc import ABC, abstractmethod class TextTableFormatter(TableFormatter): def headings(self, headers): print(' '.join('%10s' % h for h in headers)) print(('-'*10 + ' ')*len(headers)) def row(self, rowdata): print(' '.join('%10s' % d for d in rowdata)) class CSVTableFormatter(TableFormatter): def headings(self, headers): print(','.join(headers)) def row(self, rowdata): print(','.join(str(d) for d in rowdata)) class HTMLTableFormatter(TableFormatter): def headings(self, headers): print('<tr>', end=' ') for h in headers: print('<th>%s</th>' % h, end=' ') print('</tr>') def row(self, rowdata): print('<tr>', end=' ') for d in rowdata: print('<td>%s</td>' % d, end=' ') print('</tr>') class ColumnFormatMixin: formats = [] def row(self, rowdata): rowdata = [ (fmt % item) for fmt, item in zip(self.formats, rowdata)] super().row(rowdata) class UpperHeadersMixin: def headings(self, headers): super().headings([h.upper() for h in headers]) def create_formatter(name, column_formats=None, upper_headers=False): if name == 'text': formatter_cls = TextTableFormatter elif name == 'csv': formatter_cls = CSVTableFormatter elif name == 'html': formatter_cls = HTMLTableFormatter else: raise RuntimeError('Unknown format %s' % name) if column_formats: class formatter_cls(ColumnFormatMixin, formatter_cls): formats = column_formats if upper_headers: class formatter_cls(UpperHeadersMixin, formatter_cls): pass return formatter_cls()
null
21,972
from abc import ABC, abstractmethod class TableFormatter(ABC): def headings(self, headers): pass def row(self, rowdata): pass def print_table(records, fields, formatter): if not isinstance(formatter, TableFormatter): raise TypeError('Expected a TableFormatter') formatter.headings(fields) for r in records: rowdata = [getattr(r, fieldname) for fieldname in fields] formatter.row(rowdata)
null
21,973
from abc import ABC, abstractmethod class TextTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): class CSVTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): class HTMLTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): class ColumnFormatMixin: def row(self, rowdata): class UpperHeadersMixin: def headings(self, headers): def create_formatter(name, column_formats=None, upper_headers=False): if name == 'text': formatter_cls = TextTableFormatter elif name == 'csv': formatter_cls = CSVTableFormatter elif name == 'html': formatter_cls = HTMLTableFormatter else: raise RuntimeError('Unknown format %s' % name) if column_formats: class formatter_cls(ColumnFormatMixin, formatter_cls): formats = column_formats if upper_headers: class formatter_cls(UpperHeadersMixin, formatter_cls): pass return formatter_cls()
null
21,974
import csv from abc import ABC, abstractmethod class DictCSVParser(CSVParser): def __init__(self, types): def make_record(self, headers, row): def read_csv_as_dicts(filename, types): parser = DictCSVParser(types) return parser.parse(filename)
null
21,975
import csv from abc import ABC, abstractmethod class InstanceCSVParser(CSVParser): def __init__(self, cls): self.cls = cls def make_record(self, headers, row): return self.cls.from_row(row) def read_csv_as_instances(filename, cls): parser = InstanceCSVParser(cls) return parser.parse(filename)
null
21,978
def print_table(records, fields, formatter): formatter.headings(fields) for r in records: rowdata = [getattr(r, fieldname) for fieldname in fields] formatter.row(rowdata)
null
21,979
class TextTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): class CSVTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): class HTMLTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): def create_formatter(name): if name == 'text': formatter_cls = TextTableFormatter elif name == 'csv': formatter_cls = CSVTableFormatter elif name == 'html': formatter_cls = HTMLTableFormatter else: raise RuntimeError('Unknown format %s' % name) return formatter_cls()
null
21,980
import csv The provided code snippet includes necessary dependencies for implementing the `read_csv_as_dicts` function. Write a Python function `def read_csv_as_dicts(filename, types)` to solve the following problem: Read a CSV file into a list of dicts with column type conversion Here is the function: def read_csv_as_dicts(filename, types): ''' Read a CSV file into a list of dicts with column type conversion ''' records = [] with open(filename) as f: rows = csv.reader(f) headers = next(rows) for row in rows: record = { name: func(val) for name, func, val in zip(headers, types, row) } records.append(record) return records
Read a CSV file into a list of dicts with column type conversion
21,981
import csv The provided code snippet includes necessary dependencies for implementing the `read_csv_as_instances` function. Write a Python function `def read_csv_as_instances(filename, cls)` to solve the following problem: Read a CSV file into a list of instances Here is the function: def read_csv_as_instances(filename, cls): ''' Read a CSV file into a list of instances ''' records = [] with open(filename) as f: rows = csv.reader(f) headers = next(rows) for row in rows: records.append(cls.from_row(row)) return records
Read a CSV file into a list of instances
21,982
import sys import random chars = '\|/' def draw(rows, columns): for r in range(rows): print(''.join(random.choice(chars) for _ in range(columns)))
null
21,984
class TextTableFormatter(TableFormatter): def headings(self, headers): print(' '.join('%10s' % h for h in headers)) print(('-'*10 + ' ')*len(headers)) def row(self, rowdata): print(' '.join('%10s' % d for d in rowdata)) class CSVTableFormatter(TableFormatter): def headings(self, headers): print(','.join(headers)) def row(self, rowdata): print(','.join(str(d) for d in rowdata)) class HTMLTableFormatter(TableFormatter): def headings(self, headers): print('<tr>', end=' ') for h in headers: print('<th>%s</th>' % h, end=' ') print('</tr>') def row(self, rowdata): print('<tr>', end=' ') for d in rowdata: print('<td>%s</td>' % d, end=' ') print('</tr>') def create_formatter(name): if name == 'text': formatter_cls = TextTableFormatter elif name == 'csv': formatter_cls = CSVTableFormatter elif name == 'html': formatter_cls = HTMLTableFormatter else: raise RuntimeError('Unknown format %s' % name) return formatter_cls()
null
21,987
def typedproperty(name, expected_type): private_name = '_' + name @property def value(self): return getattr(self, private_name) @value.setter def value(self, val): if not isinstance(val, expected_type): raise TypeError(f'Expected {expected_type}') setattr(self, private_name, val) return value
null
21,989
from abc import ABC, abstractmethod class TextTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): class CSVTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): class HTMLTableFormatter(TableFormatter): def headings(self, headers): def row(self, rowdata): def create_formatter(name): if name == 'text': formatter = TextTableFormatter elif name == 'csv': formatter = CSVTableFormatter elif name == 'html': formatter = HTMLTableFormatter else: raise RuntimeError('Unknown format %s' % name) return formatter()
null
21,990
import csv from abc import ABC, abstractmethod class DictCSVParser(CSVParser): def __init__(self, types): self.types = types def make_record(self, headers, row): return { name: func(val) for name, func, val in zip(headers, self.types, row) } def read_csv_as_dicts(filename, types): parser = DictCSVParser(types) return parser.parse(filename)
null
21,992
def print_table(records, fields): # Print the table headers in a 10-character wide field for fieldname in fields: print('%10s' % fieldname, end=' ') print() # Print the separator bars print(('-'*10 + ' ')*len(fields)) # Output the table contents for r in records: for fieldname in fields: print('%10s' % getattr(r, fieldname), end=' ') print()
null
21,993
class Stock: types = (str, int, float) def __init__(self, name, shares, price): self.name = name self.shares = shares self.price = price def from_row(cls, row): values = [func(val) for func, val in zip(cls.types, row)] return cls(*values) def cost(self): return self.shares * self.price def sell(self, nshares): self.shares -= nshares The provided code snippet includes necessary dependencies for implementing the `read_portfolio` function. Write a Python function `def read_portfolio(filename)` to solve the following problem: Read a CSV file of stock data into a list of Stocks Here is the function: def read_portfolio(filename): ''' Read a CSV file of stock data into a list of Stocks ''' import csv portfolio = [] with open(filename) as f: rows = csv.reader(f) headers = next(rows) for row in rows: record = Stock.from_row(row) portfolio.append(record) return portfolio
Read a CSV file of stock data into a list of Stocks
21,996
def logged(func): print('Adding logging to', func.__name__) def wrapper(*args,**kwargs): print('Calling', func.__name__) return func(*args,**kwargs) return wrapper
null
21,997
from logcall import logged def add(x,y): return x+y
null
21,998
from logcall import logged def sub(x,y): return x-y
null
21,999
from inspect import signature def isvalidator(item): return isinstance(item, type) and issubclass(item, Validator) def validated(func): sig = signature(func) # Gather the function annotations annotations = { name:val for name, val in func.__annotations__.items() if isvalidator(val) } # Get the return annotation (if any) retcheck = annotations.pop('return', None) def wrapper(*args, **kwargs): bound = sig.bind(*args, **kwargs) errors = [] # Enforce argument checks for name, validator in annotations.items(): try: validator.check(bound.arguments[name]) except Exception as e: errors.append(f' {name}: {e}') if errors: raise TypeError('Bad Arguments\n' + '\n'.join(errors)) result = func(*args, **kwargs) # Enforce return check (if any) if retcheck: try: retcheck.check(result) except Exception as e: raise TypeError(f'Bad return: {e}') from None return result return wrapper
null
22,001
class Integer(Typed): expected_type = int from inspect import signature def div(x:Integer, y:Integer) -> Integer: return x / y
null
22,002
from functools import wraps logged = logformat('Calling {func.__name__}') def logformat(fmt): def logged(func): print('Adding logging to', func.__name__) @wraps(func) def wrapper(*args,**kwargs): print(fmt.format(func=func)) return func(*args, **kwargs) return wrapper return logged
null
22,003
from logcall import logged, logformat def add(x,y): return x+y
null
22,004
from logcall import logged, logformat def sub(x,y): return x-y
null
22,005
from logcall import logged, logformat def mul(x,y): return x*y
null
22,008
class Integer(Typed): from inspect import signature from functools import wraps def add(x:Integer, y:Integer) -> Integer: return x + y
null
22,009
class Integer(Typed): expected_type = int from inspect import signature from functools import wraps def div(x:Integer, y:Integer) -> Integer: return x / y
null
22,014
import csv def read_portfolio(filename): portfolio = [] with open(filename) as f: rows = csv.reader(f) headers = next(rows) for row in rows: record = { 'name' : row[0], 'shares' : int(row[1]), 'price' : float(row[2]) } portfolio.append(record) return portfolio
null
22,015
class Stock: def __init__(self, name, shares, price): self.name = name self.shares = shares self.price = price def cost(self): return self.shares * self.price def sell(self, nshares): self.shares -= nshares The provided code snippet includes necessary dependencies for implementing the `read_portfolio` function. Write a Python function `def read_portfolio(filename)` to solve the following problem: Read a CSV file of stock data into a list of Stocks Here is the function: def read_portfolio(filename): ''' Read a CSV file of stock data into a list of Stocks ''' import csv portfolio = [] with open(filename) as f: rows = csv.reader(f) headers = next(rows) for row in rows: record = Stock(row[0], int(row[1]), float(row[2])) portfolio.append(record) return portfolio
Read a CSV file of stock data into a list of Stocks
22,016
The provided code snippet includes necessary dependencies for implementing the `print_portfolio` function. Write a Python function `def print_portfolio(portfolio)` to solve the following problem: Make a nicely formatted table showing stock data Here is the function: def print_portfolio(portfolio): ''' Make a nicely formatted table showing stock data ''' print('%10s %10s %10s' % ('name', 'shares', 'price')) print(('-'*10 + ' ')*3) for s in portfolio: print('%10s %10d %10.2f' % (s.name, s.shares, s.price))
Make a nicely formatted table showing stock data
22,023
import os import time import csv def receive(expected_type): msg = yield assert isinstance(msg, expected_type), 'Expected type %s' % (expected_type) return msg from functools import wraps def printer(): while True: item = yield from receive(object) print(item)
null
22,024
from socket import * from select import select from collections import deque from types import coroutine tasks = deque() recv_wait = {} send_wait = {} def run(): while any([tasks, recv_wait, send_wait]): while not tasks: can_recv, can_send, _ = select(recv_wait, send_wait, []) for s in can_recv: tasks.append(recv_wait.pop(s)) for s in can_send: tasks.append(send_wait.pop(s)) task = tasks.popleft() try: reason, resource = task.send(None) if reason == 'recv': recv_wait[resource] = task elif reason == 'send': send_wait[resource] = task else: raise RuntimeError('Unknown reason %r' % reason) except StopIteration: print('Task done')
null
22,025
from socket import * from select import select from collections import deque from types import coroutine tasks = deque() class GenSocket: def __init__(self, sock): def accept(self): def recv(self, maxsize): def send(self, data): def __getattr__(self, name): async def tcp_server(address, handler): sock = GenSocket(socket(AF_INET, SOCK_STREAM)) sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) sock.bind(address) sock.listen(5) while True: client, addr = await sock.accept() tasks.append(handler(client, addr))
null
22,026
from socket import * from select import select from collections import deque from types import coroutine async def echo_handler(client, address): print('Connection from', address) while True: data = await client.recv(1000) if not data: break await client.send(b'GOT:' + data) print('Connection closed')
null
22,027
from inspect import signature from functools import wraps def isvalidator(item): def validated(func): sig = signature(func) # Gather the function annotations annotations = { name:val for name, val in func.__annotations__.items() if isvalidator(val) } # Get the return annotation (if any) retcheck = annotations.pop('return', None) @wraps(func) def wrapper(*args, **kwargs): bound = sig.bind(*args, **kwargs) errors = [] # Enforce argument checks for name, validator in annotations.items(): try: validator.check(bound.arguments[name]) except Exception as e: errors.append(f' {name}: {e}') if errors: raise TypeError('Bad Arguments\n' + '\n'.join(errors)) result = func(*args, **kwargs) # Enforce return check (if any) if retcheck: try: retcheck.check(result) except Exception as e: raise TypeError(f'Bad return: {e}') from None return result return wrapper
null
22,032
from socket import * from select import select from collections import deque tasks = deque() recv_wait = {} send_wait = {} def run(): while any([tasks, recv_wait, send_wait]): while not tasks: can_recv, can_send, _ = select(recv_wait, send_wait, []) for s in can_recv: tasks.append(recv_wait.pop(s)) for s in can_send: tasks.append(send_wait.pop(s)) task = tasks.popleft() try: reason, resource = task.send(None) if reason == 'recv': recv_wait[resource] = task elif reason == 'send': send_wait[resource] = task else: raise RuntimeError('Unknown reason %r' % reason) except StopIteration: print('Task done')
null
22,033
from socket import * from select import select from collections import deque tasks = deque() class GenSocket: def __init__(self, sock): self.sock = sock def accept(self): yield 'recv', self.sock client, addr = self.sock.accept() return GenSocket(client), addr def recv(self, maxsize): yield 'recv', self.sock return self.sock.recv(maxsize) def send(self, data): yield 'send', self.sock return self.sock.send(data) def __getattr__(self, name): return getattr(self.sock, name) def tcp_server(address, handler): sock = GenSocket(socket(AF_INET, SOCK_STREAM)) sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) sock.bind(address) sock.listen(5) while True: client, addr = yield from sock.accept() tasks.append(handler(client, addr))
null
22,034
from socket import * from select import select from collections import deque def echo_handler(client, address): print('Connection from', address) while True: data = yield from client.recv(1000) if not data: break yield from client.send(b'GOT:' + data) print('Connection closed')
null
22,035
from structure import Structure from validate import String, Integer, Float from cofollow import consumer, follow, receive from tableformat import create_formatter import csv def receive(expected_type): msg = yield assert isinstance(msg, expected_type), 'Expected type %s' % (expected_type) return msg def to_csv(target): def producer(): while True: yield line reader = csv.reader(producer()) while True: line = yield from receive(str) target.send(next(reader))
null
22,036
from structure import Structure from validate import String, Integer, Float class Ticker(Structure): name = String() price = Float() date = String() time = String() change = Float() open = Float() high = Float() low = Float() volume = Integer() from cofollow import consumer, follow, receive from tableformat import create_formatter import csv def receive(expected_type): msg = yield assert isinstance(msg, expected_type), 'Expected type %s' % (expected_type) return msg def create_ticker(target): while True: row = yield from receive(list) target.send(Ticker.from_row(row))
null
22,037
from structure import Structure from validate import String, Integer, Float class Ticker(Structure): from cofollow import consumer, follow, receive from tableformat import create_formatter import csv def receive(expected_type): def negchange(target): while True: record = yield from receive(Ticker) if record.change < 0: target.send(record)
null
22,038
from structure import Structure from validate import String, Integer, Float class Ticker(Structure): name = String() price = Float() date = String() time = String() change = Float() open = Float() high = Float() low = Float() volume = Integer() from cofollow import consumer, follow, receive from tableformat import create_formatter import csv def receive(expected_type): msg = yield assert isinstance(msg, expected_type), 'Expected type %s' % (expected_type) return msg def create_formatter(name, column_formats=None, upper_headers=False): if name == 'text': formatter_cls = TextTableFormatter elif name == 'csv': formatter_cls = CSVTableFormatter elif name == 'html': formatter_cls = HTMLTableFormatter else: raise RuntimeError('Unknown format %s' % name) if column_formats: class formatter_cls(ColumnFormatMixin, formatter_cls): formats = column_formats if upper_headers: class formatter_cls(UpperHeadersMixin, formatter_cls): pass return formatter_cls() def ticker(fmt, fields): formatter = create_formatter('text') formatter.headings(fields) while True: rec = yield from receive(Ticker) row = [getattr(rec, name) for name in fields] formatter.row(row)
null
22,039
from abc import ABC, abstractmethod import csv import logging class DictCSVParser(CSVParser): def __init__(self, types): def make_record(self, headers, row): def read_csv_as_dicts(filename, types): parser = DictCSVParser(types) return parser.parse(filename)
null
22,040
from abc import ABC, abstractmethod import csv import logging class InstanceCSVParser(CSVParser): def __init__(self, cls): self.cls = cls def make_record(self, headers, row): return self.cls.from_row(row) def read_csv_as_instances(filename, cls): parser = InstanceCSVParser(cls) return parser.parse(filename)
null
22,041
print(portfolio_cost('../../Data/portfolio3.dat')) def portfolio_cost(filename): total_cost = 0.0 with open(filename) as f: for line in f: fields = line.split() try: nshares = int(fields[1]) price = float(fields[2]) total_cost = total_cost + nshares*price # This catches errors in int() and float() conversions above except ValueError as e: print("Couldn't parse:", repr(line)) print("Reason:", e) return total_cost
null
22,046
import os import time from functools import wraps def follow(filename, target): with open(filename, 'r') as f: f.seek(0,os.SEEK_END) while True: line = f.readline() if line != '': target.send(line) else: time.sleep(0.1)
null
22,047
import os import time from functools import wraps def consumer(func): @wraps(func) def start(*args,**kwargs): f = func(*args,**kwargs) f.send(None) return f return start
null
22,048
import os import time from functools import wraps def printer(): while True: item = yield print(item)
null
22,054
from structure import Structure from cofollow import consumer, follow from tableformat import create_formatter import csv def to_csv(target): def producer(): while True: yield line reader = csv.reader(producer()) while True: line = yield target.send(next(reader))
null
22,055
from structure import Structure class Ticker(Structure): name = String() price = Float() date = String() time = String() change = Float() open = Float() high = Float() low = Float() volume = Integer() from cofollow import consumer, follow from tableformat import create_formatter import csv def create_ticker(target): while True: row = yield target.send(Ticker.from_row(row))
null
22,056
from structure import Structure from cofollow import consumer, follow from tableformat import create_formatter import csv def negchange(target): while True: record = yield if record.change < 0: target.send(record)
null
22,057
from structure import Structure from cofollow import consumer, follow from tableformat import create_formatter import csv def create_formatter(name, column_formats=None, upper_headers=False): if name == 'text': formatter_cls = TextTableFormatter elif name == 'csv': formatter_cls = CSVTableFormatter elif name == 'html': formatter_cls = HTMLTableFormatter else: raise RuntimeError('Unknown format %s' % name) if column_formats: class formatter_cls(ColumnFormatMixin, formatter_cls): formats = column_formats if upper_headers: class formatter_cls(UpperHeadersMixin, formatter_cls): pass return formatter_cls() def ticker(fmt, fields): formatter = create_formatter(fmt) formatter.headings(fields) while True: rec = yield row = [getattr(rec, name) for name in fields] formatter.row(row)
null
22,067
import os import time import csv The provided code snippet includes necessary dependencies for implementing the `follow` function. Write a Python function `def follow(filename)` to solve the following problem: Generator that produces a sequence of lines being written at the end of a file. Here is the function: def follow(filename): ''' Generator that produces a sequence of lines being written at the end of a file. ''' with open(filename,'r') as f: f.seek(0,os.SEEK_END) while True: line = f.readline() if line == '': time.sleep(0.1) # Sleep briefly to avoid busy wait continue yield line
Generator that produces a sequence of lines being written at the end of a file.
22,069
from collections import deque tasks = deque() def run(): while tasks: task = tasks.popleft() try: next(task) tasks.append(task) except StopIteration: print('Task done')
null
22,070
from collections import deque def countdown(n): while n > 0: print('T-minus', n) yield n -= 1
null
22,071
from collections import deque def countup(n): x = 0 while x < n: print('Up we go', x) yield x += 1
null
22,073
from socket import * from select import select from collections import deque tasks = deque() def tcp_server(address, handler): sock = socket(AF_INET, SOCK_STREAM) sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) sock.bind(address) sock.listen(5) while True: yield 'recv', sock client, addr = sock.accept() tasks.append(handler(client, addr))
null
22,074
from socket import * from select import select from collections import deque def echo_handler(client, address): print('Connection from', address) while True: yield 'recv', client data = client.recv(1000) if not data: break yield 'send', client client.send(b'GOT:' + data) print('Connection closed')
null
22,075
from validate import Validator, validated class Validator: def check(cls, value): return value def validated(func): sig = signature(func) # Gather the function annotations annotations = { name:val for name, val in func.__annotations__.items() if isvalidator(val) } # Get the return annotation (if any) retcheck = annotations.pop('return', None) def wrapper(*args, **kwargs): bound = sig.bind(*args, **kwargs) errors = [] # Enforce argument checks for name, validator in annotations.items(): try: validator.check(bound.arguments[name]) except Exception as e: errors.append(f' {name}: {e}') if errors: raise TypeError('Bad Arguments\n' + '\n'.join(errors)) result = func(*args, **kwargs) # Enforce return check (if any) if retcheck: try: retcheck.check(result) except Exception as e: raise TypeError(f'Bad return: {e}') from None return result return wrapper The provided code snippet includes necessary dependencies for implementing the `validate_attributes` function. Write a Python function `def validate_attributes(cls)` to solve the following problem: Class decorator that scans a class definition for Validators and builds a _fields variable that captures their definition order. Here is the function: def validate_attributes(cls): ''' Class decorator that scans a class definition for Validators and builds a _fields variable that captures their definition order. ''' validators = [] for name, val in vars(cls).items(): if isinstance(val, Validator): validators.append(val) # Apply validated decorator to any callable with annotations elif callable(val) and val.__annotations__: setattr(cls, name, validated(val)) # Collect all of the field names cls._fields = tuple([v.name for v in validators]) # Collect type conversions. The lambda x:x is an identity # function that's used in case no expected_type is found. cls._types = tuple([ getattr(v, 'expected_type', lambda x: x) for v in validators ]) # Create the __init__ method if cls._fields: cls.create_init() return cls
Class decorator that scans a class definition for Validators and builds a _fields variable that captures their definition order.
22,078
class Integer(Typed): expected_type = int from inspect import signature from functools import wraps def add(x:Integer, y:Integer) -> Integer: return x + y
null
22,083
def print_table(records, fields): print(' '.join('%10s' % fieldname for fieldname in fields)) print(('-'*10 + ' ')*len(fields)) for record in records: print(' '.join('%10s' % getattr(record, fieldname) for fieldname in fields))
null
22,085
import collections import csv class DataCollection(collections.abc.Sequence): def __init__(self, columns): self.column_names = list(columns) self.column_data = list(columns.values()) def __len__(self): return len(self.column_data[0]) def __getitem__(self, index): return dict(zip(self.column_names, (col[index] for col in self.column_data))) def read_csv_as_columns(filename, types): columns = collections.defaultdict(list) with open(filename) as f: rows = csv.reader(f) headers = next(rows) for row in rows: for name, func, val in zip(headers, types, row): columns[name].append(func(val)) return DataCollection(columns)
null
22,087
x = 42 print("Loaded simplemod") def foo(): print("x is %s" % x)
null
22,090
from abc import ABC, abstractmethod class TableFormatter(ABC): _formats = { } def __init_subclass__(cls): name = cls.__module__.split('.')[-1] TableFormatter._formats[name] = cls def headings(self, headers): pass def row(self, rowdata): pass def print_table(records, fields, formatter): if not isinstance(formatter, TableFormatter): raise RuntimeError('Expected a TableFormatter') formatter.headings(fields) for r in records: rowdata = [getattr(r, fieldname) for fieldname in fields] formatter.row(rowdata)
null
22,091
from abc import ABC, abstractmethod class TableFormatter(ABC): _formats = { } def __init_subclass__(cls): name = cls.__module__.split('.')[-1] TableFormatter._formats[name] = cls def headings(self, headers): pass def row(self, rowdata): pass class ColumnFormatMixin: formats = [] def row(self, rowdata): rowdata = [ (fmt % item) for fmt, item in zip(self.formats, rowdata)] super().row(rowdata) class UpperHeadersMixin: def headings(self, headers): super().headings([h.upper() for h in headers]) def create_formatter(name, column_formats=None, upper_headers=False): if name not in TableFormatter._formats: __import__(f'{__package__}.formats.{name}') formatter_cls = TableFormatter._formats.get(name) if not formatter_cls: raise RuntimeError('Unknown format %s' % name) if column_formats: class formatter_cls(ColumnFormatMixin, formatter_cls): formats = column_formats if upper_headers: class formatter_cls(UpperHeadersMixin, formatter_cls): pass return formatter_cls()
null
22,099
from validate import Validator, validated class Structure: _fields = () _types = () def __setattr__(self, name, value): if name.startswith('_') or name in self._fields: super().__setattr__(name, value) else: raise AttributeError('No attribute %s' % name) def __repr__(self): return '%s(%s)' % (type(self).__name__, ', '.join(repr(getattr(self, name)) for name in self._fields)) def from_row(cls, row): rowdata = [ func(val) for func, val in zip(cls._types, row) ] return cls(*rowdata) def create_init(cls): ''' Create an __init__ method from _fields ''' args = ','.join(cls._fields) code = f'def __init__(self, {args}):\n' for name in cls._fields: code += f' self.{name} = {name}\n' locs = { } exec(code, locs) cls.__init__ = locs['__init__'] def __init_subclass__(cls): # Apply the validated decorator to subclasses validate_attributes(cls) def typed_structure(clsname, **validators): cls = type(clsname, (Structure,), validators) return cls
null
22,105
import math import time import threading def minutes(tm): am_pm = tm[-2:] fields = tm[:-2].split(":") hour = int(fields[0]) minute = int(fields[1]) if hour == 12: hour = 0 if am_pm == 'pm': hour += 12 return hour*60 + minute def minutes_to_str(m): frac,m = math.modf(m) hours = m//60 minutes = m % 60 seconds = frac * 60 return "%02d:%02d.%02.f" % (hours,minutes,seconds)
null
22,106
import math import time import threading def minutes(tm): am_pm = tm[-2:] fields = tm[:-2].split(":") hour = int(fields[0]) minute = int(fields[1]) if hour == 12: hour = 0 if am_pm == 'pm': hour += 12 return hour*60 + minute def read_history(filename): result = [] for line in open(filename): str_fields = line.strip().split(",") fields = [eval(x) for x in str_fields] fields[3] = minutes(fields[3]) result.append(fields) return result
null
22,107
import math import time import threading def csv_record(fields): s = '"%s",%0.2f,"%s","%s",%0.2f,%0.2f,%0.2f,%0.2f,%d' % tuple(fields) return s
null
22,109
import torch import torch.nn as nn import torch.nn.functional as F from .base_model import BaseModel from .blocks import ( FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder, forward_vit, ) class FeatureFusionBlock_custom(nn.Module): def __init__( self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, ): def forward(self, *xs): def _make_fusion_block(features, use_bn): return FeatureFusionBlock_custom( features, nn.ReLU(False), deconv=False, bn=use_bn, expand=False, align_corners=True, )
null
22,110
import torch import torch.nn as nn import timm import types import math import torch.nn.functional as F activations = {} def forward_flex(self, x): def forward_vit(pretrained, x): b, c, h, w = x.shape glob = pretrained.model.forward_flex(x) layer_1 = pretrained.activations["1"] layer_2 = pretrained.activations["2"] layer_3 = pretrained.activations["3"] layer_4 = pretrained.activations["4"] layer_1 = pretrained.act_postprocess1[0:2](layer_1) layer_2 = pretrained.act_postprocess2[0:2](layer_2) layer_3 = pretrained.act_postprocess3[0:2](layer_3) layer_4 = pretrained.act_postprocess4[0:2](layer_4) unflatten = nn.Sequential( nn.Unflatten( 2, torch.Size( [ h // pretrained.model.patch_size[1], w // pretrained.model.patch_size[0], ] ), ) ) if layer_1.ndim == 3: layer_1 = unflatten(layer_1) if layer_2.ndim == 3: layer_2 = unflatten(layer_2) if layer_3.ndim == 3: layer_3 = unflatten(layer_3) if layer_4.ndim == 3: layer_4 = unflatten(layer_4) layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1) layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2) layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3) layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4) return layer_1, layer_2, layer_3, layer_4
null
22,111
import torch import torch.nn as nn import timm import types import math import torch.nn.functional as F def _make_vit_b16_backbone( model, features=[96, 192, 384, 768], size=[384, 384], hooks=[2, 5, 8, 11], vit_features=768, use_readout="ignore", start_index=1, enable_attention_hooks=False, ): def _make_pretrained_deitb16_384( pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False ): model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained) hooks = [2, 5, 8, 11] if hooks == None else hooks return _make_vit_b16_backbone( model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout, enable_attention_hooks=enable_attention_hooks, )
null
22,112
import torch import torch.nn as nn import timm import types import math import torch.nn.functional as F def _make_vit_b16_backbone( model, features=[96, 192, 384, 768], size=[384, 384], hooks=[2, 5, 8, 11], vit_features=768, use_readout="ignore", start_index=1, enable_attention_hooks=False, ): pretrained = nn.Module() pretrained.model = model pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4")) pretrained.activations = activations if enable_attention_hooks: pretrained.model.blocks[hooks[0]].attn.register_forward_hook( get_attention("attn_1") ) pretrained.model.blocks[hooks[1]].attn.register_forward_hook( get_attention("attn_2") ) pretrained.model.blocks[hooks[2]].attn.register_forward_hook( get_attention("attn_3") ) pretrained.model.blocks[hooks[3]].attn.register_forward_hook( get_attention("attn_4") ) pretrained.attention = attention readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) # 32, 48, 136, 384 pretrained.act_postprocess1 = nn.Sequential( readout_oper[0], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d( in_channels=vit_features, out_channels=features[0], kernel_size=1, stride=1, padding=0, ), nn.ConvTranspose2d( in_channels=features[0], out_channels=features[0], kernel_size=4, stride=4, padding=0, bias=True, dilation=1, groups=1, ), ) pretrained.act_postprocess2 = nn.Sequential( readout_oper[1], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d( in_channels=vit_features, out_channels=features[1], kernel_size=1, stride=1, padding=0, ), nn.ConvTranspose2d( in_channels=features[1], out_channels=features[1], kernel_size=2, stride=2, padding=0, bias=True, dilation=1, groups=1, ), ) pretrained.act_postprocess3 = nn.Sequential( readout_oper[2], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d( in_channels=vit_features, out_channels=features[2], kernel_size=1, stride=1, padding=0, ), ) pretrained.act_postprocess4 = nn.Sequential( readout_oper[3], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d( in_channels=vit_features, out_channels=features[3], kernel_size=1, stride=1, padding=0, ), nn.Conv2d( in_channels=features[3], out_channels=features[3], kernel_size=3, stride=2, padding=1, ), ) pretrained.model.start_index = start_index pretrained.model.patch_size = [16, 16] # We inject this function into the VisionTransformer instances so that # we can use it with interpolated position embeddings without modifying the library source. pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) pretrained.model._resize_pos_embed = types.MethodType( _resize_pos_embed, pretrained.model ) return pretrained def _make_pretrained_deitb16_distil_384( pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False ): model = timm.create_model( "vit_deit_base_distilled_patch16_384", pretrained=pretrained ) hooks = [2, 5, 8, 11] if hooks == None else hooks return _make_vit_b16_backbone( model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout, start_index=2, enable_attention_hooks=enable_attention_hooks, )
null
22,113
import torch import torch.nn as nn from .vit import ( _make_pretrained_vitb_rn50_384, _make_pretrained_vitl16_384, _make_pretrained_vitb16_384, forward_vit, ) def _make_scratch(in_shape, out_shape, groups=1, expand=False): def _make_pretrained_resnext101_wsl(use_pretrained): def _make_pretrained_vitb_rn50_384( pretrained, use_readout="ignore", hooks=None, use_vit_only=False, enable_attention_hooks=False, ): def _make_pretrained_vitl16_384( pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False ): def _make_pretrained_vitb16_384( pretrained, use_readout="ignore", hooks=None, enable_attention_hooks=False ): def _make_encoder( backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore", enable_attention_hooks=False, ): if backbone == "vitl16_384": pretrained = _make_pretrained_vitl16_384( use_pretrained, hooks=hooks, use_readout=use_readout, enable_attention_hooks=enable_attention_hooks, ) scratch = _make_scratch( [256, 512, 1024, 1024], features, groups=groups, expand=expand ) # ViT-L/16 - 85.0% Top1 (backbone) elif backbone == "vitb_rn50_384": pretrained = _make_pretrained_vitb_rn50_384( use_pretrained, hooks=hooks, use_vit_only=use_vit_only, use_readout=use_readout, enable_attention_hooks=enable_attention_hooks, ) scratch = _make_scratch( [256, 512, 768, 768], features, groups=groups, expand=expand ) # ViT-H/16 - 85.0% Top1 (backbone) elif backbone == "vitb16_384": pretrained = _make_pretrained_vitb16_384( use_pretrained, hooks=hooks, use_readout=use_readout, enable_attention_hooks=enable_attention_hooks, ) scratch = _make_scratch( [96, 192, 384, 768], features, groups=groups, expand=expand ) # ViT-B/16 - 84.6% Top1 (backbone) elif backbone == "resnext101_wsl": pretrained = _make_pretrained_resnext101_wsl(use_pretrained) scratch = _make_scratch( [256, 512, 1024, 2048], features, groups=groups, expand=expand ) # efficientnet_lite3 else: print(f"Backbone '{backbone}' not implemented") assert False return pretrained, scratch
null
22,114
import os import glob import torch import cv2 import argparse import util.io from torchvision.transforms import Compose from dpt.models import DPTDepthModel from dpt.midas_net import MidasNet_large from dpt.transforms import Resize, NormalizeImage, PrepareForNet class DPTDepthModel(DPT): def __init__( self, path=None, non_negative=True, scale=1.0, shift=0.0, invert=False, **kwargs ): features = kwargs["features"] if "features" in kwargs else 256 self.scale = scale self.shift = shift self.invert = invert head = nn.Sequential( nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode="bilinear", align_corners=True), nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(True) if non_negative else nn.Identity(), nn.Identity(), ) super().__init__(head, **kwargs) if path is not None: self.load(path) def forward(self, x): inv_depth = super().forward(x).squeeze(dim=1) if self.invert: depth = self.scale * inv_depth + self.shift depth[depth < 1e-8] = 1e-8 depth = 1.0 / depth return depth else: return inv_depth class MidasNet_large(BaseModel): """Network for monocular depth estimation.""" def __init__(self, path=None, features=256, non_negative=True): """Init. Args: path (str, optional): Path to saved model. Defaults to None. features (int, optional): Number of features. Defaults to 256. backbone (str, optional): Backbone network for encoder. Defaults to resnet50 """ print("Loading weights: ", path) super(MidasNet_large, self).__init__() use_pretrained = False if path is None else True self.pretrained, self.scratch = _make_encoder( backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained ) self.scratch.refinenet4 = FeatureFusionBlock(features) self.scratch.refinenet3 = FeatureFusionBlock(features) self.scratch.refinenet2 = FeatureFusionBlock(features) self.scratch.refinenet1 = FeatureFusionBlock(features) self.scratch.output_conv = nn.Sequential( nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode="bilinear"), nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(True) if non_negative else nn.Identity(), ) if path: self.load(path) def forward(self, x): """Forward pass. Args: x (tensor): input data (image) Returns: tensor: depth """ layer_1 = self.pretrained.layer1(x) layer_2 = self.pretrained.layer2(layer_1) layer_3 = self.pretrained.layer3(layer_2) layer_4 = self.pretrained.layer4(layer_3) layer_1_rn = self.scratch.layer1_rn(layer_1) layer_2_rn = self.scratch.layer2_rn(layer_2) layer_3_rn = self.scratch.layer3_rn(layer_3) layer_4_rn = self.scratch.layer4_rn(layer_4) path_4 = self.scratch.refinenet4(layer_4_rn) path_3 = self.scratch.refinenet3(path_4, layer_3_rn) path_2 = self.scratch.refinenet2(path_3, layer_2_rn) path_1 = self.scratch.refinenet1(path_2, layer_1_rn) out = self.scratch.output_conv(path_1) return torch.squeeze(out, dim=1) class Resize(object): """Resize sample to given size (width, height).""" def __init__( self, width, height, resize_target=True, keep_aspect_ratio=False, ensure_multiple_of=1, resize_method="lower_bound", image_interpolation_method=cv2.INTER_AREA, ): """Init. Args: width (int): desired output width height (int): desired output height resize_target (bool, optional): True: Resize the full sample (image, mask, target). False: Resize image only. Defaults to True. keep_aspect_ratio (bool, optional): True: Keep the aspect ratio of the input sample. Output sample might not have the given width and height, and resize behaviour depends on the parameter 'resize_method'. Defaults to False. ensure_multiple_of (int, optional): Output width and height is constrained to be multiple of this parameter. Defaults to 1. resize_method (str, optional): "lower_bound": Output will be at least as large as the given size. "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) "minimal": Scale as least as possible. (Output size might be smaller than given size.) Defaults to "lower_bound". """ self.__width = width self.__height = height self.__resize_target = resize_target self.__keep_aspect_ratio = keep_aspect_ratio self.__multiple_of = ensure_multiple_of self.__resize_method = resize_method self.__image_interpolation_method = image_interpolation_method def constrain_to_multiple_of(self, x, min_val=0, max_val=None): y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) if max_val is not None and y > max_val: y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) if y < min_val: y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) return y def get_size(self, width, height): # determine new height and width scale_height = self.__height / height scale_width = self.__width / width if self.__keep_aspect_ratio: if self.__resize_method == "lower_bound": # scale such that output size is lower bound if scale_width > scale_height: # fit width scale_height = scale_width else: # fit height scale_width = scale_height elif self.__resize_method == "upper_bound": # scale such that output size is upper bound if scale_width < scale_height: # fit width scale_height = scale_width else: # fit height scale_width = scale_height elif self.__resize_method == "minimal": # scale as least as possbile if abs(1 - scale_width) < abs(1 - scale_height): # fit width scale_height = scale_width else: # fit height scale_width = scale_height else: raise ValueError( f"resize_method {self.__resize_method} not implemented" ) if self.__resize_method == "lower_bound": new_height = self.constrain_to_multiple_of( scale_height * height, min_val=self.__height ) new_width = self.constrain_to_multiple_of( scale_width * width, min_val=self.__width ) elif self.__resize_method == "upper_bound": new_height = self.constrain_to_multiple_of( scale_height * height, max_val=self.__height ) new_width = self.constrain_to_multiple_of( scale_width * width, max_val=self.__width ) elif self.__resize_method == "minimal": new_height = self.constrain_to_multiple_of(scale_height * height) new_width = self.constrain_to_multiple_of(scale_width * width) else: raise ValueError(f"resize_method {self.__resize_method} not implemented") return (new_width, new_height) def __call__(self, sample): width, height = self.get_size( sample["image"].shape[1], sample["image"].shape[0] ) # resize sample sample["image"] = cv2.resize( sample["image"], (width, height), interpolation=self.__image_interpolation_method, ) if self.__resize_target: if "disparity" in sample: sample["disparity"] = cv2.resize( sample["disparity"], (width, height), interpolation=cv2.INTER_NEAREST, ) if "depth" in sample: sample["depth"] = cv2.resize( sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST ) sample["mask"] = cv2.resize( sample["mask"].astype(np.float32), (width, height), interpolation=cv2.INTER_NEAREST, ) sample["mask"] = sample["mask"].astype(bool) return sample class NormalizeImage(object): """Normlize image by given mean and std.""" def __init__(self, mean, std): self.__mean = mean self.__std = std def __call__(self, sample): sample["image"] = (sample["image"] - self.__mean) / self.__std return sample class PrepareForNet(object): """Prepare sample for usage as network input.""" def __init__(self): pass def __call__(self, sample): image = np.transpose(sample["image"], (2, 0, 1)) sample["image"] = np.ascontiguousarray(image).astype(np.float32) if "mask" in sample: sample["mask"] = sample["mask"].astype(np.float32) sample["mask"] = np.ascontiguousarray(sample["mask"]) if "disparity" in sample: disparity = sample["disparity"].astype(np.float32) sample["disparity"] = np.ascontiguousarray(disparity) if "depth" in sample: depth = sample["depth"].astype(np.float32) sample["depth"] = np.ascontiguousarray(depth) return sample The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run(input_path, output_path, model_path, model_type="dpt_hybrid", optimize=True)` to solve the following problem: Run MonoDepthNN to compute depth maps. Args: input_path (str): path to input folder output_path (str): path to output folder model_path (str): path to saved model Here is the function: def run(input_path, output_path, model_path, model_type="dpt_hybrid", optimize=True): """Run MonoDepthNN to compute depth maps. Args: input_path (str): path to input folder output_path (str): path to output folder model_path (str): path to saved model """ print("initialize") # select device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("device: %s" % device) # load network if model_type == "dpt_large": # DPT-Large net_w = net_h = 384 model = DPTDepthModel( path=model_path, backbone="vitl16_384", non_negative=True, enable_attention_hooks=False, ) normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == "dpt_hybrid": # DPT-Hybrid net_w = net_h = 384 model = DPTDepthModel( path=model_path, backbone="vitb_rn50_384", non_negative=True, enable_attention_hooks=False, ) normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == "dpt_hybrid_kitti": net_w = 1216 net_h = 352 model = DPTDepthModel( path=model_path, scale=0.00006016, shift=0.00579, invert=True, backbone="vitb_rn50_384", non_negative=True, enable_attention_hooks=False, ) normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == "dpt_hybrid_nyu": net_w = 640 net_h = 480 model = DPTDepthModel( path=model_path, scale=0.000305, shift=0.1378, invert=True, backbone="vitb_rn50_384", non_negative=True, enable_attention_hooks=False, ) normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == "midas_v21": # Convolutional model net_w = net_h = 384 model = MidasNet_large(model_path, non_negative=True) normalization = NormalizeImage( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) else: assert ( False ), f"model_type '{model_type}' not implemented, use: --model_type [dpt_large|dpt_hybrid|dpt_hybrid_kitti|dpt_hybrid_nyu|midas_v21]" transform = Compose( [ Resize( net_w, net_h, resize_target=None, keep_aspect_ratio=True, ensure_multiple_of=32, resize_method="minimal", image_interpolation_method=cv2.INTER_CUBIC, ), normalization, PrepareForNet(), ] ) model.eval() if optimize == True and device == torch.device("cuda"): model = model.to(memory_format=torch.channels_last) model = model.half() model.to(device) # get input img_names = glob.glob(os.path.join(input_path, "*")) num_images = len(img_names) # create output folder os.makedirs(output_path, exist_ok=True) print("start processing") for ind, img_name in enumerate(img_names): if os.path.isdir(img_name): continue print(" processing {} ({}/{})".format(img_name, ind + 1, num_images)) # input img = util.io.read_image(img_name) if args.kitti_crop is True: height, width, _ = img.shape top = height - 352 left = (width - 1216) // 2 img = img[top : top + 352, left : left + 1216, :] img_input = transform({"image": img})["image"] # compute with torch.no_grad(): sample = torch.from_numpy(img_input).to(device).unsqueeze(0) if optimize == True and device == torch.device("cuda"): sample = sample.to(memory_format=torch.channels_last) sample = sample.half() prediction = model.forward(sample) prediction = ( torch.nn.functional.interpolate( prediction.unsqueeze(1), size=img.shape[:2], mode="bicubic", align_corners=False, ) .squeeze() .cpu() .numpy() ) if model_type == "dpt_hybrid_kitti": prediction *= 256 if model_type == "dpt_hybrid_nyu": prediction *= 1000.0 filename = os.path.join( output_path, os.path.splitext(os.path.basename(img_name))[0] ) util.io.write_depth(filename, prediction, bits=2, absolute_depth=args.absolute_depth) print("finished")
Run MonoDepthNN to compute depth maps. Args: input_path (str): path to input folder output_path (str): path to output folder model_path (str): path to saved model
22,115
from PIL import Image def _get_voc_pallete(num_cls): n = num_cls pallete = [0]*(n*3) for j in range(0,n): lab = j pallete[j*3+0] = 0 pallete[j*3+1] = 0 pallete[j*3+2] = 0 i = 0 while (lab > 0): pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i)) pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i)) pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i)) i = i + 1 lab >>= 3 return pallete
null
22,116
import matplotlib.pyplot as plt from dpt.vit import get_mean_attention_map def get_mean_attention_map(attn, token, shape): attn = attn[:, :, token, 1:] attn = attn.unflatten(2, torch.Size([shape[2] // 16, shape[3] // 16])).float() attn = torch.nn.functional.interpolate( attn, size=shape[2:], mode="bicubic", align_corners=False ).squeeze(0) all_attn = torch.mean(attn, 0) return all_attn def visualize_attention(input, model, prediction, model_type): input = (input + 1.0)/2.0 attn1 = model.pretrained.attention["attn_1"] attn2 = model.pretrained.attention["attn_2"] attn3 = model.pretrained.attention["attn_3"] attn4 = model.pretrained.attention["attn_4"] plt.subplot(3,4,1), plt.imshow(input.squeeze().permute(1,2,0)), plt.title("Input", fontsize=8), plt.axis("off") plt.subplot(3,4,2), plt.imshow(prediction), plt.set_cmap("inferno"), plt.title("Prediction", fontsize=8), plt.axis("off") if model_type == "dpt_hybrid": h = [3,6,9,12] else: h = [6,12,18,24] # upper left plt.subplot(345), ax1 = plt.imshow(get_mean_attention_map(attn1, 1, input.shape)) plt.ylabel("Upper left corner", fontsize=8) plt.title(f"Layer {h[0]}", fontsize=8) gc = plt.gca() gc.axes.xaxis.set_ticklabels([]) gc.axes.yaxis.set_ticklabels([]) gc.axes.xaxis.set_ticks([]) gc.axes.yaxis.set_ticks([]) plt.subplot(346), plt.imshow(get_mean_attention_map(attn2, 1, input.shape)) plt.title(f"Layer {h[1]}", fontsize=8) plt.axis("off"), plt.subplot(347), plt.imshow(get_mean_attention_map(attn3, 1, input.shape)) plt.title(f"Layer {h[2]}", fontsize=8) plt.axis("off"), plt.subplot(348), plt.imshow(get_mean_attention_map(attn4, 1, input.shape)) plt.title(f"Layer {h[3]}", fontsize=8) plt.axis("off"), # lower right plt.subplot(3,4,9), plt.imshow(get_mean_attention_map(attn1, -1, input.shape)) plt.ylabel("Lower right corner", fontsize=8) gc = plt.gca() gc.axes.xaxis.set_ticklabels([]) gc.axes.yaxis.set_ticklabels([]) gc.axes.xaxis.set_ticks([]) gc.axes.yaxis.set_ticks([]) plt.subplot(3,4,10), plt.imshow(get_mean_attention_map(attn2, -1, input.shape)), plt.axis("off") plt.subplot(3,4,11), plt.imshow(get_mean_attention_map(attn3, -1, input.shape)), plt.axis("off") plt.subplot(3,4,12), plt.imshow(get_mean_attention_map(attn4, -1, input.shape)), plt.axis("off") plt.tight_layout() plt.show()
null
22,117
import sys import re import numpy as np import cv2 import torch from PIL import Image from .pallete import get_mask_pallete The provided code snippet includes necessary dependencies for implementing the `read_pfm` function. Write a Python function `def read_pfm(path)` to solve the following problem: Read pfm file. Args: path (str): path to file Returns: tuple: (data, scale) Here is the function: def read_pfm(path): """Read pfm file. Args: path (str): path to file Returns: tuple: (data, scale) """ with open(path, "rb") as file: color = None width = None height = None scale = None endian = None header = file.readline().rstrip() if header.decode("ascii") == "PF": color = True elif header.decode("ascii") == "Pf": color = False else: raise Exception("Not a PFM file: " + path) dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) if dim_match: width, height = list(map(int, dim_match.groups())) else: raise Exception("Malformed PFM header.") scale = float(file.readline().decode("ascii").rstrip()) if scale < 0: # little-endian endian = "<" scale = -scale else: # big-endian endian = ">" data = np.fromfile(file, endian + "f") shape = (height, width, 3) if color else (height, width) data = np.reshape(data, shape) data = np.flipud(data) return data, scale
Read pfm file. Args: path (str): path to file Returns: tuple: (data, scale)
22,118
import sys import re import numpy as np import cv2 import torch from PIL import Image from .pallete import get_mask_pallete The provided code snippet includes necessary dependencies for implementing the `resize_image` function. Write a Python function `def resize_image(img)` to solve the following problem: Resize image and make it fit for network. Args: img (array): image Returns: tensor: data ready for network Here is the function: def resize_image(img): """Resize image and make it fit for network. Args: img (array): image Returns: tensor: data ready for network """ height_orig = img.shape[0] width_orig = img.shape[1] if width_orig > height_orig: scale = width_orig / 384 else: scale = height_orig / 384 height = (np.ceil(height_orig / scale / 32) * 32).astype(int) width = (np.ceil(width_orig / scale / 32) * 32).astype(int) img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA) img_resized = ( torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float() ) img_resized = img_resized.unsqueeze(0) return img_resized
Resize image and make it fit for network. Args: img (array): image Returns: tensor: data ready for network