hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73e2b455b2469350da91d9c9222fae224cc7c1c | 612 | py | Python | examples/my_top_tracks.py | Piteryo/spotipy | f548038aac6bd0eb4cf7ade2295c441a141880d7 | [
"MIT"
] | 1 | 2020-07-05T22:37:16.000Z | 2020-07-05T22:37:16.000Z | examples/my_top_tracks.py | Piteryo/spotipy | f548038aac6bd0eb4cf7ade2295c441a141880d7 | [
"MIT"
] | null | null | null | examples/my_top_tracks.py | Piteryo/spotipy | f548038aac6bd0eb4cf7ade2295c441a141880d7 | [
"MIT"
] | 1 | 2021-02-24T01:52:02.000Z | 2021-02-24T01:52:02.000Z | # Shows the top tracks for a user
import sys
import spotipy
from spotipy.oauth2 import SpotifyOAuth
if len(sys.argv) > 1:
username = sys.argv[1]
else:
print("Usage: %s username" % (sys.argv[0],))
sys.exit()
scope = 'user-top-read'
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
ranges = ['short_term', 'medium_term', 'long_term']
for sp_range in ranges:
print("range:", sp_range)
results = sp.current_user_top_tracks(time_range=sp_range, limit=50)
for i, item in enumerate(results['items']):
print(i, item['name'], '//', item['artists'][0]['name'])
print() | 25.5 | 71 | 0.671569 |
import sys
import spotipy
from spotipy.oauth2 import SpotifyOAuth
if len(sys.argv) > 1:
username = sys.argv[1]
else:
print("Usage: %s username" % (sys.argv[0],))
sys.exit()
scope = 'user-top-read'
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
ranges = ['short_term', 'medium_term', 'long_term']
for sp_range in ranges:
print("range:", sp_range)
results = sp.current_user_top_tracks(time_range=sp_range, limit=50)
for i, item in enumerate(results['items']):
print(i, item['name'], '//', item['artists'][0]['name'])
print() | true | true |
f73e2b8f54d3b3fc1bcc24ef1b0f9ce11a9ce9c5 | 2,360 | py | Python | city_scrapers/spiders/det_great_lakes_water_authority.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | city_scrapers/spiders/det_great_lakes_water_authority.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | city_scrapers/spiders/det_great_lakes_water_authority.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import scrapy
from ics import Calendar
from city_scrapers.spider import Spider
class DetGreatLakesWaterAuthoritySpider(Spider):
name = 'det_great_lakes_water_authority'
agency_id = 'Great Lakes Water Authority'
timezone = 'America/Detroit'
allowed_domains = ['www.glwater.org']
start_urls = ['http://www.glwater.org/events/']
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the Event Schema
<https://city-bureau.github.io/city-scrapers/06_event_schema.html>.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
next_page = response.css('.tribe-events-nav-next')[0].xpath('a/@href').extract_first()
if next_page:
yield scrapy.Request(next_page, callback=self.parse)
yield scrapy.Request(response.url + '?ical=1&tribe_display=month', callback=self._parse_ical)
def _parse_ical(self, ical_event):
cal = Calendar(ical_event.text)
for event in cal.events:
# Meetings parens to indicate status (e.g. (Canceled))
desc = re.search(r'(?P<name>[^()]+)(?P<status>\(([^()]+)\))?', event.name)
data = {
'_type': 'event',
'name': desc.group('name').strip(),
'event_description': event.description,
'classification': self._parse_classification(desc.group('name')),
'start': {'date': event.begin.date(), 'time': event.begin.time(), 'note': ''},
'end': {'date': event.end.date(), 'time': event.end.time(), 'note': ''},
'all_day': event.all_day,
'location': {'name': '', 'address': event.location, 'neighborhood': ''},
'documents': [],
'sources': [{'url': event.url, 'note': ''}]
}
data['id'] = self._generate_id(data)
data['status'] = self._generate_status(data, desc.group(0))
yield data
@staticmethod
def _parse_classification(name):
"""
Parse or generate classification (e.g. public health, education, etc).
"""
if 'BOARD' in name.upper():
return 'Board'
if 'COMMITTEE' in name.upper():
return 'Committee'
return ''
| 38.688525 | 101 | 0.572458 |
import re
import scrapy
from ics import Calendar
from city_scrapers.spider import Spider
class DetGreatLakesWaterAuthoritySpider(Spider):
name = 'det_great_lakes_water_authority'
agency_id = 'Great Lakes Water Authority'
timezone = 'America/Detroit'
allowed_domains = ['www.glwater.org']
start_urls = ['http://www.glwater.org/events/']
def parse(self, response):
next_page = response.css('.tribe-events-nav-next')[0].xpath('a/@href').extract_first()
if next_page:
yield scrapy.Request(next_page, callback=self.parse)
yield scrapy.Request(response.url + '?ical=1&tribe_display=month', callback=self._parse_ical)
def _parse_ical(self, ical_event):
cal = Calendar(ical_event.text)
for event in cal.events:
desc = re.search(r'(?P<name>[^()]+)(?P<status>\(([^()]+)\))?', event.name)
data = {
'_type': 'event',
'name': desc.group('name').strip(),
'event_description': event.description,
'classification': self._parse_classification(desc.group('name')),
'start': {'date': event.begin.date(), 'time': event.begin.time(), 'note': ''},
'end': {'date': event.end.date(), 'time': event.end.time(), 'note': ''},
'all_day': event.all_day,
'location': {'name': '', 'address': event.location, 'neighborhood': ''},
'documents': [],
'sources': [{'url': event.url, 'note': ''}]
}
data['id'] = self._generate_id(data)
data['status'] = self._generate_status(data, desc.group(0))
yield data
@staticmethod
def _parse_classification(name):
if 'BOARD' in name.upper():
return 'Board'
if 'COMMITTEE' in name.upper():
return 'Committee'
return ''
| true | true |
f73e2c425af73c8125b90455522e4d3fa756fb7e | 1,575 | py | Python | ch_13/src/log_catcher.py | real-slim-chadi/Python-Object-Oriented-Programming---4th-edition | 7c486866171786b620795fa33a79ec9ac9a8ba1b | [
"MIT"
] | 43 | 2021-06-03T18:39:09.000Z | 2022-03-29T20:32:13.000Z | ch_13/src/log_catcher.py | real-slim-chadi/Python-Object-Oriented-Programming---4th-edition | 7c486866171786b620795fa33a79ec9ac9a8ba1b | [
"MIT"
] | 9 | 2022-03-12T01:04:07.000Z | 2022-03-12T01:05:01.000Z | ch_13/src/log_catcher.py | real-slim-chadi/Python-Object-Oriented-Programming---4th-edition | 7c486866171786b620795fa33a79ec9ac9a8ba1b | [
"MIT"
] | 36 | 2021-06-19T07:14:09.000Z | 2022-03-12T22:17:09.000Z | """
Python 3 Object-Oriented Programming
Chapter 13. Testing Object-Oriented Programs.
"""
import json
from pathlib import Path
import socketserver
from typing import TextIO
import pickle
import struct
import sys
class LogDataCatcher(socketserver.BaseRequestHandler):
log_file: TextIO
count: int = 0
size_format = ">L"
size_bytes = struct.calcsize(size_format)
def handle(self) -> None:
size_header_bytes = self.request.recv(LogDataCatcher.size_bytes)
while size_header_bytes:
payload_size = struct.unpack(LogDataCatcher.size_format, size_header_bytes)
print(f"{size_header_bytes=} {payload_size=}", file=sys.stderr)
payload_bytes = self.request.recv(payload_size[0])
print(f"{len(payload_bytes)=}", file=sys.stderr)
payload = pickle.loads(payload_bytes)
LogDataCatcher.count += 1
print(f"{self.client_address[0]} {LogDataCatcher.count} {payload!r}")
self.log_file.write(json.dumps(payload) + "\n")
try:
size_header_bytes = self.request.recv(LogDataCatcher.size_bytes)
except (ConnectionResetError, BrokenPipeError):
break
def main(host: str, port: int, target: Path) -> None:
with target.open("w") as unified_log:
LogDataCatcher.log_file = unified_log
with socketserver.TCPServer((host, port), LogDataCatcher) as server:
server.serve_forever()
if __name__ == "__main__":
HOST, PORT = "localhost", 18842
main(HOST, PORT, Path("one.log"))
| 32.8125 | 87 | 0.667937 | import json
from pathlib import Path
import socketserver
from typing import TextIO
import pickle
import struct
import sys
class LogDataCatcher(socketserver.BaseRequestHandler):
log_file: TextIO
count: int = 0
size_format = ">L"
size_bytes = struct.calcsize(size_format)
def handle(self) -> None:
size_header_bytes = self.request.recv(LogDataCatcher.size_bytes)
while size_header_bytes:
payload_size = struct.unpack(LogDataCatcher.size_format, size_header_bytes)
print(f"{size_header_bytes=} {payload_size=}", file=sys.stderr)
payload_bytes = self.request.recv(payload_size[0])
print(f"{len(payload_bytes)=}", file=sys.stderr)
payload = pickle.loads(payload_bytes)
LogDataCatcher.count += 1
print(f"{self.client_address[0]} {LogDataCatcher.count} {payload!r}")
self.log_file.write(json.dumps(payload) + "\n")
try:
size_header_bytes = self.request.recv(LogDataCatcher.size_bytes)
except (ConnectionResetError, BrokenPipeError):
break
def main(host: str, port: int, target: Path) -> None:
with target.open("w") as unified_log:
LogDataCatcher.log_file = unified_log
with socketserver.TCPServer((host, port), LogDataCatcher) as server:
server.serve_forever()
if __name__ == "__main__":
HOST, PORT = "localhost", 18842
main(HOST, PORT, Path("one.log"))
| true | true |
f73e2c475e39c712227ebb0cbd85fe1f13ab6e2f | 28,082 | py | Python | satchmo/apps/product/south_migrations/0002_add_attributeoption.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
] | 16 | 2015-03-06T14:42:27.000Z | 2019-12-23T21:37:01.000Z | satchmo/apps/product/south_migrations/0002_add_attributeoption.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
] | null | null | null | satchmo/apps/product/south_migrations/0002_add_attributeoption.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
] | 8 | 2015-01-28T16:02:37.000Z | 2022-03-03T21:29:40.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.logger import get_logger
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AttributeOption'
db.create_table('product_attributeoption', (
('name', self.gf('django.db.models.fields.SlugField')(max_length=100, db_index=True)),
('error_message', self.gf('django.db.models.fields.CharField')(default=u'Inavlid Entry', max_length=100)),
('sort_order', self.gf('django.db.models.fields.IntegerField')(default=1)),
('validation', self.gf('django.db.models.fields.CharField')(max_length=100)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('product', ['AttributeOption'])
# TODO add default validation for AttributeOption
from product.models import VALIDATIONS as validations
default_validation = validations[0][0]
if not db.dry_run:
for attr in orm['product.productattribute'].objects.all():
if orm['product.attributeoption'].objects.filter(name__exact=attr.name).count() < 1:
orm['product.attributeoption'].objects.create(
description=attr.name, name=attr.name,
validation=default_validation,
)
if db.backend_name=='sqlite3':
get_logger().debug("dropping and re-creating table for ProductAttribute")
if db.dry_run:
return
#
# We re-create ProductAttribute, since sqlite does not support adding
# foreign key constraints on existing tables (ie. adding ForeignKey
# fields).
#
# We have to do 0003's work here, because we can't iterate over
# ProductAttribute instances there - the 'option' column has not
# been created and django barfs if we do so.
#
# Collect old data
old_attrs = {}
for attr in orm['product.ProductAttribute'].objects.all():
obj = {}
# We have already collected 'name' earlier, so we can leave it
# out.
# TODO make this more generic
for k in ('product', 'languagecode', 'value'):
obj[k] = getattr(attr, k)
old_attrs[attr.id] = obj
# Deleting old 'ProductAttribute' table
db.delete_table('product_productattribute')
# Re-use create_table expression for old 'ProductAttribute', this
# time with the adding the 'option' column
db.create_table('product_productattribute', (
('languagecode', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['product.Product'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
('name', self.gf('django.db.models.fields.SlugField')(max_length=100, db_index=True)),
('option', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['product.AttributeOption']))
))
db.send_create_signal('product', ['ProductAttribute'])
# Add back data
for id, attr_dict in old_attrs.items():
kwargs = {}
for field in ('product', 'languagecode', 'value'):
kwargs[field] = attr_dict[field]
orm['product.ProductAttribute'].objects.create(
id=id, **kwargs)
def backwards(self, orm):
# Deleting model 'AttributeOption'
db.delete_table('product_attributeoption')
if db.backend_name == 'sqlite3':
# Since we added the 'option' column in forwards(), delete it here.
db.delete_column('product_productattribute', 'option_id')
models = {
'product.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'error_message': ('django.db.models.fields.CharField', [], {'default': "u'Inavlid Entry'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'validation': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'product.category': {
'Meta': {'unique_together': "(('site', 'slug'),)", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Category']"}),
'related_categories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_categories'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Category']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'product.categoryimage': {
'Meta': {'unique_together': "(('category', 'sort'),)", 'object_name': 'CategoryImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'picture': ('satchmo_utils.thumbnail.field.ImageWithThumbnailField', [], {'name_field': "'_filename'", 'max_length': '200'}),
'sort': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'product.categoryimagetranslation': {
'Meta': {'unique_together': "(('categoryimage', 'languagecode', 'version'),)", 'object_name': 'CategoryImageTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'categoryimage': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.CategoryImage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.categorytranslation': {
'Meta': {'unique_together': "(('category', 'languagecode', 'version'),)", 'object_name': 'CategoryTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.configurableproduct': {
'Meta': {'object_name': 'ConfigurableProduct'},
'create_subs': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'option_group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.OptionGroup']", 'blank': 'True'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'product.customproduct': {
'Meta': {'object_name': 'CustomProduct'},
'deferred_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'downpayment': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'option_group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.OptionGroup']", 'blank': 'True'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'product.customtextfield': {
'Meta': {'object_name': 'CustomTextField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'price_change': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '14', 'decimal_places': '6', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custom_text_fields'", 'to': "orm['product.CustomProduct']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'product.customtextfieldtranslation': {
'Meta': {'unique_together': "(('customtextfield', 'languagecode', 'version'),)", 'object_name': 'CustomTextFieldTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'customtextfield': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.CustomTextField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.discount': {
'Meta': {'object_name': 'Discount'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allValid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allowedUses': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'amount': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'automatic': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'unique': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'endDate': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minOrder': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'numUses': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'percentage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'shipping': ('django.db.models.fields.CharField', [], {'default': "'NONE'", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'startDate': ('django.db.models.fields.DateField', [], {}),
'validProducts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.Product']", 'null': 'True', 'blank': 'True'})
},
'product.downloadableproduct': {
'Meta': {'object_name': 'DownloadableProduct'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'expire_minutes': ('django.db.models.fields.IntegerField', [], {}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'num_allowed_downloads': ('django.db.models.fields.IntegerField', [], {}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'product.option': {
'Meta': {'unique_together': "(('option_group', 'value'),)", 'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.OptionGroup']"}),
'price_change': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '14', 'decimal_places': '6', 'blank': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'product.optiongroup': {
'Meta': {'object_name': 'OptionGroup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'product.optiongrouptranslation': {
'Meta': {'unique_together': "(('optiongroup', 'languagecode', 'version'),)", 'object_name': 'OptionGroupTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'optiongroup': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.OptionGroup']"}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.optiontranslation': {
'Meta': {'unique_together': "(('option', 'languagecode', 'version'),)", 'object_name': 'OptionTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.Option']"}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.price': {
'Meta': {'unique_together': "(('product', 'quantity', 'expires'),)", 'object_name': 'Price'},
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('satchmo_utils.fields.CurrencyField', [], {'max_digits': '14', 'decimal_places': '6'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.Product']"}),
'quantity': ('django.db.models.fields.DecimalField', [], {'default': "'1.0'", 'max_digits': '18', 'decimal_places': '6'})
},
'product.product': {
'Meta': {'unique_together': "(('site', 'sku'), ('site', 'slug'))", 'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'also_purchased': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'also_products'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Product']"}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.Category']", 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'height': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'height_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_in_stock': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '18', 'decimal_places': '6'}),
'length': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'length_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_items': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Product']"}),
'shipclass': ('django.db.models.fields.CharField', [], {'default': "'DEFAULT'", 'max_length': '10'}),
'short_description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'taxClass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.TaxClass']", 'null': 'True', 'blank': 'True'}),
'taxable': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'total_sold': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '18', 'decimal_places': '6'}),
'weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'weight_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'width_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'})
},
'product.productattribute': {
'Meta': {'object_name': 'ProductAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.Product']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'product.productimage': {
'Meta': {'object_name': 'ProductImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'picture': ('satchmo_utils.thumbnail.field.ImageWithThumbnailField', [], {'name_field': "'_filename'", 'max_length': '200'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.Product']", 'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'product.productimagetranslation': {
'Meta': {'unique_together': "(('productimage', 'languagecode', 'version'),)", 'object_name': 'ProductImageTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'productimage': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.ProductImage']"}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.productpricelookup': {
'Meta': {'object_name': 'ProductPriceLookup'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'discountable': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_in_stock': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '6'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'parentid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '14', 'decimal_places': '6'}),
'productslug': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'quantity': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '6'}),
'siteid': ('django.db.models.fields.IntegerField', [], {})
},
'product.producttranslation': {
'Meta': {'unique_together': "(('product', 'languagecode', 'version'),)", 'object_name': 'ProductTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.Product']"}),
'short_description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.productvariation': {
'Meta': {'object_name': 'ProductVariation'},
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.Option']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.ConfigurableProduct']"}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'product.subscriptionproduct': {
'Meta': {'object_name': 'SubscriptionProduct'},
'expire_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'expire_unit': ('django.db.models.fields.CharField', [], {'default': "'DAY'", 'max_length': '5'}),
'is_shippable': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'}),
'recurring': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'recurring_times': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'product.taxclass': {
'Meta': {'object_name': 'TaxClass'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'product.trial': {
'Meta': {'object_name': 'Trial'},
'expire_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.SubscriptionProduct']"})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['product']
| 77.360882 | 198 | 0.564347 |
import datetime
from south.db import db
from south.logger import get_logger
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table('product_attributeoption', (
('name', self.gf('django.db.models.fields.SlugField')(max_length=100, db_index=True)),
('error_message', self.gf('django.db.models.fields.CharField')(default=u'Inavlid Entry', max_length=100)),
('sort_order', self.gf('django.db.models.fields.IntegerField')(default=1)),
('validation', self.gf('django.db.models.fields.CharField')(max_length=100)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('product', ['AttributeOption'])
from product.models import VALIDATIONS as validations
default_validation = validations[0][0]
if not db.dry_run:
for attr in orm['product.productattribute'].objects.all():
if orm['product.attributeoption'].objects.filter(name__exact=attr.name).count() < 1:
orm['product.attributeoption'].objects.create(
description=attr.name, name=attr.name,
validation=default_validation,
)
if db.backend_name=='sqlite3':
get_logger().debug("dropping and re-creating table for ProductAttribute")
if db.dry_run:
return
old_attrs = {}
for attr in orm['product.ProductAttribute'].objects.all():
obj = {}
for k in ('product', 'languagecode', 'value'):
obj[k] = getattr(attr, k)
old_attrs[attr.id] = obj
db.delete_table('product_productattribute')
db.create_table('product_productattribute', (
('languagecode', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['product.Product'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
('name', self.gf('django.db.models.fields.SlugField')(max_length=100, db_index=True)),
('option', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['product.AttributeOption']))
))
db.send_create_signal('product', ['ProductAttribute'])
for id, attr_dict in old_attrs.items():
kwargs = {}
for field in ('product', 'languagecode', 'value'):
kwargs[field] = attr_dict[field]
orm['product.ProductAttribute'].objects.create(
id=id, **kwargs)
def backwards(self, orm):
db.delete_table('product_attributeoption')
if db.backend_name == 'sqlite3':
db.delete_column('product_productattribute', 'option_id')
models = {
'product.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'error_message': ('django.db.models.fields.CharField', [], {'default': "u'Inavlid Entry'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'validation': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'product.category': {
'Meta': {'unique_together': "(('site', 'slug'),)", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Category']"}),
'related_categories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_categories'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Category']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'product.categoryimage': {
'Meta': {'unique_together': "(('category', 'sort'),)", 'object_name': 'CategoryImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'picture': ('satchmo_utils.thumbnail.field.ImageWithThumbnailField', [], {'name_field': "'_filename'", 'max_length': '200'}),
'sort': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'product.categoryimagetranslation': {
'Meta': {'unique_together': "(('categoryimage', 'languagecode', 'version'),)", 'object_name': 'CategoryImageTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'categoryimage': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.CategoryImage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.categorytranslation': {
'Meta': {'unique_together': "(('category', 'languagecode', 'version'),)", 'object_name': 'CategoryTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.configurableproduct': {
'Meta': {'object_name': 'ConfigurableProduct'},
'create_subs': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'option_group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.OptionGroup']", 'blank': 'True'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'product.customproduct': {
'Meta': {'object_name': 'CustomProduct'},
'deferred_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'downpayment': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'option_group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.OptionGroup']", 'blank': 'True'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'product.customtextfield': {
'Meta': {'object_name': 'CustomTextField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'price_change': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '14', 'decimal_places': '6', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'custom_text_fields'", 'to': "orm['product.CustomProduct']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'product.customtextfieldtranslation': {
'Meta': {'unique_together': "(('customtextfield', 'languagecode', 'version'),)", 'object_name': 'CustomTextFieldTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'customtextfield': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.CustomTextField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.discount': {
'Meta': {'object_name': 'Discount'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allValid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allowedUses': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'amount': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'automatic': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'unique': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'endDate': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minOrder': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'numUses': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'percentage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'shipping': ('django.db.models.fields.CharField', [], {'default': "'NONE'", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'startDate': ('django.db.models.fields.DateField', [], {}),
'validProducts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.Product']", 'null': 'True', 'blank': 'True'})
},
'product.downloadableproduct': {
'Meta': {'object_name': 'DownloadableProduct'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'expire_minutes': ('django.db.models.fields.IntegerField', [], {}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'num_allowed_downloads': ('django.db.models.fields.IntegerField', [], {}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'product.option': {
'Meta': {'unique_together': "(('option_group', 'value'),)", 'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.OptionGroup']"}),
'price_change': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '14', 'decimal_places': '6', 'blank': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'product.optiongroup': {
'Meta': {'object_name': 'OptionGroup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'product.optiongrouptranslation': {
'Meta': {'unique_together': "(('optiongroup', 'languagecode', 'version'),)", 'object_name': 'OptionGroupTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'optiongroup': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.OptionGroup']"}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.optiontranslation': {
'Meta': {'unique_together': "(('option', 'languagecode', 'version'),)", 'object_name': 'OptionTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.Option']"}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.price': {
'Meta': {'unique_together': "(('product', 'quantity', 'expires'),)", 'object_name': 'Price'},
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('satchmo_utils.fields.CurrencyField', [], {'max_digits': '14', 'decimal_places': '6'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.Product']"}),
'quantity': ('django.db.models.fields.DecimalField', [], {'default': "'1.0'", 'max_digits': '18', 'decimal_places': '6'})
},
'product.product': {
'Meta': {'unique_together': "(('site', 'sku'), ('site', 'slug'))", 'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'also_purchased': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'also_products'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Product']"}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.Category']", 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'height': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'height_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_in_stock': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '18', 'decimal_places': '6'}),
'length': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'length_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_items': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products'", 'blank': 'True', 'null': 'True', 'to': "orm['product.Product']"}),
'shipclass': ('django.db.models.fields.CharField', [], {'default': "'DEFAULT'", 'max_length': '10'}),
'short_description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'taxClass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.TaxClass']", 'null': 'True', 'blank': 'True'}),
'taxable': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'total_sold': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '18', 'decimal_places': '6'}),
'weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'weight_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'width_units': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'})
},
'product.productattribute': {
'Meta': {'object_name': 'ProductAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.Product']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'product.productimage': {
'Meta': {'object_name': 'ProductImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'picture': ('satchmo_utils.thumbnail.field.ImageWithThumbnailField', [], {'name_field': "'_filename'", 'max_length': '200'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.Product']", 'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'product.productimagetranslation': {
'Meta': {'unique_together': "(('productimage', 'languagecode', 'version'),)", 'object_name': 'ProductImageTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'productimage': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.ProductImage']"}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.productpricelookup': {
'Meta': {'object_name': 'ProductPriceLookup'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'discountable': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_in_stock': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '6'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'parentid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '14', 'decimal_places': '6'}),
'productslug': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'quantity': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '6'}),
'siteid': ('django.db.models.fields.IntegerField', [], {})
},
'product.producttranslation': {
'Meta': {'unique_together': "(('product', 'languagecode', 'version'),)", 'object_name': 'ProductTranslation'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languagecode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['product.Product']"}),
'short_description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'product.productvariation': {
'Meta': {'object_name': 'ProductVariation'},
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['product.Option']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.ConfigurableProduct']"}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'})
},
'product.subscriptionproduct': {
'Meta': {'object_name': 'SubscriptionProduct'},
'expire_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'expire_unit': ('django.db.models.fields.CharField', [], {'default': "'DAY'", 'max_length': '5'}),
'is_shippable': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['product.Product']", 'unique': 'True', 'primary_key': 'True'}),
'recurring': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'recurring_times': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'product.taxclass': {
'Meta': {'object_name': 'TaxClass'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'product.trial': {
'Meta': {'object_name': 'Trial'},
'expire_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('satchmo_utils.fields.CurrencyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['product.SubscriptionProduct']"})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['product']
| true | true |
f73e2c6aabd54cd5cd3654b6ad74b726d748e4d6 | 10,272 | py | Python | bin/dm_link.py | slimnsour/datman | 6ac4827e2ae20401eb4b048d42bdfca5db5d3de9 | [
"Apache-2.0"
] | null | null | null | bin/dm_link.py | slimnsour/datman | 6ac4827e2ae20401eb4b048d42bdfca5db5d3de9 | [
"Apache-2.0"
] | null | null | null | bin/dm_link.py | slimnsour/datman | 6ac4827e2ae20401eb4b048d42bdfca5db5d3de9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Renames (links) exam zip archives by consulting a lookup table.
This program looks up the proper name in a table that lists the original exam
archive name, and the target name.
Usage:
dm_link.py [options] <study>
dm_link.py [options] <study> <zipfile>
Arguments:
<study> Name of the study to process
<zipfile> Single Zipfile to process
Options:
--lookup FILE Path to scan id lookup table,
overrides metadata/scans.csv
--scanid-field STR Dicom field to match target_name with
[default: PatientName]
-v --verbose Verbose logging
-d --debug Debug logging
-q --quiet Less debuggering
--dry-run Dry run
DETAILS
This program is used to rename an exam archive with their properly
formatted scan names (see datman.scanid). Two approaches are used to find
this name:
### Scan ID in a lookup table (--lookup)
The lookup table should have atleast two columns: source_name, and
target_name. For example:
source_name target_name
2014_0126_FB001 ASDD_CMH_FB001_01_01
The source_name column is matched against the archive filename (so the
entry above applies to 2014_0126_FB001.zip). The target_name column
specifies the proper name for the exam.
If the archive is not found in the lookup table, the dicom header is
consulted:
### Scan ID in the dicom header (--scanid-field)
Some scans may have the scan ID embedded in a dicom header field.
The --scanid-field specifies a dicom header field to check for a
well-formatted exam name.
ADDITIONAL MATCH CONDITIONS
Additional columns in the lookup table can be specified to ensure that the
DICOM headers of the file match what is expected. These column names should
start with dicom_. For example,
source_name target_name dicom_StudyID
2014_0126_FB001 ASDD_CMH_FB001_01_01 512
In the example above, this script would check that the StudyID field of an
arbitrary dicom file in the archive contains the value "512". If not, an
error is thrown.
IGNORING EXAM ARCHIVES
Exam archives can be ignored by placing an entry into the lookup table with
the target_name of '<ignore>', for example:
source_name target_name dicom_StudyID
2014_0126_FB001 <ignore>
"""
import glob
import logging
import os
import sys
from docopt import docopt
import pandas as pd
import datman.config
import datman.scanid
import datman.utils
logger = logging.getLogger(os.path.basename(__file__))
already_linked = {}
lookup = None
DRYRUN = None
def main():
# make the already_linked dict global as we are going to use it a lot
global already_linked
global lookup
global DRYRUN
arguments = docopt(__doc__)
verbose = arguments["--verbose"]
debug = arguments["--debug"]
DRYRUN = arguments["--dry-run"]
quiet = arguments["--quiet"]
study = arguments["<study>"]
lookup_path = arguments["--lookup"]
scanid_field = arguments["--scanid-field"]
zipfile = arguments["<zipfile>"]
# setup logging
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.WARN)
logger.setLevel(logging.WARN)
if quiet:
logger.setLevel(logging.ERROR)
ch.setLevel(logging.ERROR)
if verbose:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - {study} - "
"%(levelname)s - %(message)s".format(
study=study))
ch.setFormatter(formatter)
logger.addHandler(ch)
# setup the config object
cfg = datman.config.config(study=study)
if not lookup_path:
lookup_path = os.path.join(cfg.get_path("meta"), "scans.csv")
dicom_path = cfg.get_path("dicom")
zips_path = cfg.get_path("zips")
if not os.path.isdir(dicom_path):
logger.warning("Dicom folder {} doesnt exist, creating it.".format(
dicom_path))
try:
os.makedirs(dicom_path)
except IOError:
logger.error("Failed to create dicom path {}".format(dicom_path))
return
if not os.path.isdir(zips_path):
logger.error("Zips path {} doesnt exist".format(zips_path))
return
try:
lookup = pd.read_csv(lookup_path, sep="\s+", dtype=str) # noqa: W605
except IOError:
logger.error("Lookup file {} not found".format(lookup_path))
return
# identify which zip files have already been linked
already_linked = {os.path.realpath(f): f
for f
in glob.glob(os.path.join(dicom_path, "*"))
if os.path.islink(f)}
if zipfile:
if isinstance(zipfile, str):
zipfile = [zipfile]
archives = [os.path.join(zips_path, zip) for zip in zipfile]
else:
archives = [os.path.join(zips_path, archive)
for archive
in os.listdir(zips_path)
if os.path.splitext(archive)[1] == ".zip"]
logger.info("Found {} archives".format(len(archives)))
for archive in archives:
link_archive(archive, dicom_path, scanid_field, cfg)
def link_archive(archive_path, dicom_path, scanid_field, config):
if not os.path.isfile(archive_path):
logger.error("Archive {} not found".format(archive_path))
return
try:
linked_path = already_linked[os.path.realpath(archive_path)]
except KeyError:
linked_path = ""
if linked_path:
logger.info("{} already linked at {}".format(archive_path,
linked_path))
return
scanid = get_scanid_from_lookup_table(archive_path)
# if scanid has been returned from the lookup table its a tuplet
# otherwise None
if scanid:
scanid, lookupinfo = scanid
if scanid == "<ignore>":
logger.info("Ignoring {}".format(archive_path))
return
if not scanid:
scanid = get_scanid_from_header(archive_path, scanid_field)
if not scanid:
logger.error("Scanid not found for archive: {}".format(archive_path))
return
try:
ident = datman.utils.validate_subject_id(scanid, config)
except datman.scanid.ParseException as e:
logger.error("Can't make link for {}. Reason: {}".format(
archive_path, e))
return
scanid = str(ident)
# do the linking
target = os.path.join(dicom_path, scanid)
target = target + datman.utils.get_extension(archive_path)
if os.path.exists(target):
logger.error("Target: {} already exists for archive: {}"
.format(target, archive_path))
return
relpath = os.path.relpath(archive_path, dicom_path)
logger.info("Linking {} to {}".format(relpath, target))
if not DRYRUN:
os.symlink(relpath, target)
def get_scanid_from_lookup_table(archive_path):
"""
Gets the scanid from the lookup table (pandas dataframe)
Returns the scanid and the rest of the lookup table information (e.g.
expected dicom header matches). If no match is found, both the scan id and
lookup table info is None.
"""
global lookup
basename = os.path.basename(os.path.normpath(archive_path))
source_name = basename[:-len(datman.utils.get_extension(basename))]
lookupinfo = lookup[lookup["source_name"] == source_name]
if len(lookupinfo) == 0:
logger.debug("{} not found in source_name column."
.format(source_name))
return
else:
scanid = lookupinfo["target_name"].tolist()[0]
return (scanid, lookupinfo)
def get_archive_headers(archive_path):
# get some DICOM headers from the archive
header = None
try:
header = datman.utils.get_archive_headers(archive_path,
stop_after_first=True)
header = list(header.values())[0]
except Exception:
logger.warning("Archive: {} contains no DICOMs".format(archive_path))
return header
def get_scanid_from_header(archive_path, scanid_field):
"""
Gets the scanid from the dicom header object.
Returns None if the header field isn't present or the value isn't a proper
scan ID.
"""
header = get_archive_headers(archive_path)
if not header:
return False
if scanid_field not in header:
logger.error("{} field is not in {} dicom headers"
.format(scanid_field, archive_path))
return
scanid = str(header.get(scanid_field))
if datman.scanid.is_scanid(scanid):
logger.debug("{}: Using scan ID from dicom field {} = {}."
.format(archive_path, scanid_field, scanid))
return scanid
else:
logger.warning("{}: {} (header {}) not valid scan ID"
.format(archive_path, scanid, scanid_field))
return None
def validate_headers(archive_path, lookupinfo, scanid_field):
"""
Validates an exam archive against the lookup table
Checks that all dicom_* dicom header fields match the lookup table
"""
header = get_archive_headers(archive_path)
if not header:
return False
columns = lookupinfo.columns.values.tolist()
dicom_cols = [c for c in columns if c.startswith("dicom_")]
for c in dicom_cols:
f = c.split("_")[1]
if f not in header:
logger.error("{} field is not in {} dicom headers"
.format(scanid_field, archive_path))
return False
actual = str(header.get(f))
expected = str(lookupinfo[c].tolist()[0])
if actual != expected:
logger.error("{}: dicom field '{}' = '{}', expected '{}'"
.format(archive_path, f, actual, expected))
return False
return True
if __name__ == "__main__":
main()
| 31.317073 | 79 | 0.628115 |
import glob
import logging
import os
import sys
from docopt import docopt
import pandas as pd
import datman.config
import datman.scanid
import datman.utils
logger = logging.getLogger(os.path.basename(__file__))
already_linked = {}
lookup = None
DRYRUN = None
def main():
global already_linked
global lookup
global DRYRUN
arguments = docopt(__doc__)
verbose = arguments["--verbose"]
debug = arguments["--debug"]
DRYRUN = arguments["--dry-run"]
quiet = arguments["--quiet"]
study = arguments["<study>"]
lookup_path = arguments["--lookup"]
scanid_field = arguments["--scanid-field"]
zipfile = arguments["<zipfile>"]
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.WARN)
logger.setLevel(logging.WARN)
if quiet:
logger.setLevel(logging.ERROR)
ch.setLevel(logging.ERROR)
if verbose:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - {study} - "
"%(levelname)s - %(message)s".format(
study=study))
ch.setFormatter(formatter)
logger.addHandler(ch)
cfg = datman.config.config(study=study)
if not lookup_path:
lookup_path = os.path.join(cfg.get_path("meta"), "scans.csv")
dicom_path = cfg.get_path("dicom")
zips_path = cfg.get_path("zips")
if not os.path.isdir(dicom_path):
logger.warning("Dicom folder {} doesnt exist, creating it.".format(
dicom_path))
try:
os.makedirs(dicom_path)
except IOError:
logger.error("Failed to create dicom path {}".format(dicom_path))
return
if not os.path.isdir(zips_path):
logger.error("Zips path {} doesnt exist".format(zips_path))
return
try:
lookup = pd.read_csv(lookup_path, sep="\s+", dtype=str)
except IOError:
logger.error("Lookup file {} not found".format(lookup_path))
return
already_linked = {os.path.realpath(f): f
for f
in glob.glob(os.path.join(dicom_path, "*"))
if os.path.islink(f)}
if zipfile:
if isinstance(zipfile, str):
zipfile = [zipfile]
archives = [os.path.join(zips_path, zip) for zip in zipfile]
else:
archives = [os.path.join(zips_path, archive)
for archive
in os.listdir(zips_path)
if os.path.splitext(archive)[1] == ".zip"]
logger.info("Found {} archives".format(len(archives)))
for archive in archives:
link_archive(archive, dicom_path, scanid_field, cfg)
def link_archive(archive_path, dicom_path, scanid_field, config):
if not os.path.isfile(archive_path):
logger.error("Archive {} not found".format(archive_path))
return
try:
linked_path = already_linked[os.path.realpath(archive_path)]
except KeyError:
linked_path = ""
if linked_path:
logger.info("{} already linked at {}".format(archive_path,
linked_path))
return
scanid = get_scanid_from_lookup_table(archive_path)
if scanid:
scanid, lookupinfo = scanid
if scanid == "<ignore>":
logger.info("Ignoring {}".format(archive_path))
return
if not scanid:
scanid = get_scanid_from_header(archive_path, scanid_field)
if not scanid:
logger.error("Scanid not found for archive: {}".format(archive_path))
return
try:
ident = datman.utils.validate_subject_id(scanid, config)
except datman.scanid.ParseException as e:
logger.error("Can't make link for {}. Reason: {}".format(
archive_path, e))
return
scanid = str(ident)
# do the linking
target = os.path.join(dicom_path, scanid)
target = target + datman.utils.get_extension(archive_path)
if os.path.exists(target):
logger.error("Target: {} already exists for archive: {}"
.format(target, archive_path))
return
relpath = os.path.relpath(archive_path, dicom_path)
logger.info("Linking {} to {}".format(relpath, target))
if not DRYRUN:
os.symlink(relpath, target)
def get_scanid_from_lookup_table(archive_path):
global lookup
basename = os.path.basename(os.path.normpath(archive_path))
source_name = basename[:-len(datman.utils.get_extension(basename))]
lookupinfo = lookup[lookup["source_name"] == source_name]
if len(lookupinfo) == 0:
logger.debug("{} not found in source_name column."
.format(source_name))
return
else:
scanid = lookupinfo["target_name"].tolist()[0]
return (scanid, lookupinfo)
def get_archive_headers(archive_path):
# get some DICOM headers from the archive
header = None
try:
header = datman.utils.get_archive_headers(archive_path,
stop_after_first=True)
header = list(header.values())[0]
except Exception:
logger.warning("Archive: {} contains no DICOMs".format(archive_path))
return header
def get_scanid_from_header(archive_path, scanid_field):
header = get_archive_headers(archive_path)
if not header:
return False
if scanid_field not in header:
logger.error("{} field is not in {} dicom headers"
.format(scanid_field, archive_path))
return
scanid = str(header.get(scanid_field))
if datman.scanid.is_scanid(scanid):
logger.debug("{}: Using scan ID from dicom field {} = {}."
.format(archive_path, scanid_field, scanid))
return scanid
else:
logger.warning("{}: {} (header {}) not valid scan ID"
.format(archive_path, scanid, scanid_field))
return None
def validate_headers(archive_path, lookupinfo, scanid_field):
header = get_archive_headers(archive_path)
if not header:
return False
columns = lookupinfo.columns.values.tolist()
dicom_cols = [c for c in columns if c.startswith("dicom_")]
for c in dicom_cols:
f = c.split("_")[1]
if f not in header:
logger.error("{} field is not in {} dicom headers"
.format(scanid_field, archive_path))
return False
actual = str(header.get(f))
expected = str(lookupinfo[c].tolist()[0])
if actual != expected:
logger.error("{}: dicom field '{}' = '{}', expected '{}'"
.format(archive_path, f, actual, expected))
return False
return True
if __name__ == "__main__":
main()
| true | true |
f73e2cae7ae81dc17b787e8465ce0f4ee2ea9092 | 9,839 | py | Python | test/dynamics/solvers/test_solver_classes.py | haggaila/qiskit-dynamics | fd20314e2b591c35323782bc429d9f928fdb9a12 | [
"Apache-2.0"
] | 1 | 2022-01-21T01:47:40.000Z | 2022-01-21T01:47:40.000Z | test/dynamics/solvers/test_solver_classes.py | haggaila/qiskit-dynamics | fd20314e2b591c35323782bc429d9f928fdb9a12 | [
"Apache-2.0"
] | null | null | null | test/dynamics/solvers/test_solver_classes.py | haggaila/qiskit-dynamics | fd20314e2b591c35323782bc429d9f928fdb9a12 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Tests for solver classes module.
"""
import numpy as np
from qiskit import QiskitError
from qiskit.quantum_info import Operator, Statevector, SuperOp, DensityMatrix
from qiskit_dynamics import Solver
from qiskit_dynamics.signals import Signal
from ..common import QiskitDynamicsTestCase, TestJaxBase
class TestSolverExceptions(QiskitDynamicsTestCase):
"""Tests for Solver exception raising based on input types."""
def setUp(self):
X = Operator.from_label("X")
self.ham_solver = Solver(hamiltonian_operators=[X], hamiltonian_signals=[1.0])
self.lindblad_solver = Solver(
hamiltonian_operators=[X], hamiltonian_signals=[1.0], dissipator_operators=[X]
)
self.vec_lindblad_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[1.0],
dissipator_operators=[X],
evaluation_mode="dense_vectorized",
)
def test_hamiltonian_shape_error(self):
"""Test error raising if invalid shape for Hamiltonian model."""
with self.assertRaises(QiskitError) as qe:
self.ham_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.ham_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.ham_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
def test_lindblad_shape_error(self):
"""Test error raising if invalid shape for Lindblad model."""
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
def test_vectorized_lindblad_shape_error(self):
"""Test error raising if invalid shape for vectorized Lindblad model."""
with self.assertRaises(QiskitError) as qe:
self.vec_lindblad_solver.solve([0.0, 1.0], np.array([[1.0, 0.0], [0.0, 1.0]]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.vec_lindblad_solver.solve([0.0, 1.0], DensityMatrix(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.vec_lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
def test_non_vectorized_SuperOp_error(self):
"""Test SuperOp simulation attempt for non-vectorized Lindblad model."""
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], SuperOp(np.eye(4)))
self.assertTrue("Simulating SuperOp" in str(qe.exception))
class TestSolver(QiskitDynamicsTestCase):
"""Tests for Solver class."""
def setUp(self):
"""Set up some simple models."""
X = 2 * np.pi * Operator.from_label("X") / 2
Z = 2 * np.pi * Operator.from_label("Z") / 2
self.ham_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
drift=5 * Z,
rotating_frame=5 * Z,
)
self.rwa_ham_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
drift=5 * Z,
rotating_frame=5 * Z,
rwa_cutoff_freq=2 * 5.0,
)
self.lindblad_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
dissipator_operators=[0.01 * X],
drift=5 * Z,
rotating_frame=5 * Z,
)
self.vec_lindblad_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
dissipator_operators=[0.01 * X],
drift=5 * Z,
rotating_frame=5 * Z,
evaluation_mode="dense_vectorized",
)
# lindblad solver with no dissipation for testing
self.vec_lindblad_solver_no_diss = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
dissipator_operators=[0.0 * X],
drift=5 * Z,
rotating_frame=5 * Z,
evaluation_mode="dense_vectorized",
)
self.method = "DOP853"
def test_lindblad_solve_statevector(self):
"""Test correct conversion of Statevector to DensityMatrix."""
results = self.lindblad_solver.solve(
[0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method
)
self.assertTrue(isinstance(results.y[-1], DensityMatrix))
self.assertTrue(results.y[-1].data[0, 0] > 0.99 and results.y[-1].data[0, 0] < 0.999)
def test_vec_lindblad_statevector(self):
"""Test correct conversion of Statevector to DensityMatrix and vectorized solving."""
results = self.vec_lindblad_solver.solve(
[0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method
)
results2 = self.lindblad_solver.solve(
[0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method
)
self.assertTrue(isinstance(results.y[-1], DensityMatrix))
self.assertAllClose(results.y[-1].data, results2.y[-1].data)
def test_array_vectorized_lindblad(self):
"""Test Lindblad solver is array-vectorized."""
results = self.lindblad_solver.solve(
[0.0, 1.0],
y0=np.array([[[0.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 0.0]]]),
method=self.method,
)
self.assertTrue(results.y[-1][0, 0, 0] > 0.99 and results.y[-1][0, 0, 0] < 0.999)
self.assertTrue(results.y[-1][1, 1, 1] > 0.99 and results.y[-1][1, 1, 1] < 0.999)
def test_rwa_hamiltonian(self):
"""Test perfect inversion for pi pulse with RWA."""
results = self.rwa_ham_solver.solve(
[0.0, 1.0], y0=np.array([0.0, 1.0]), atol=1e-10, rtol=1e-10, method=self.method
)
self.assertTrue(np.abs(results.y[-1][0]) > (1 - 1e-8))
def test_hamiltonian_DensityMatrix(self):
"""Test correct conjugation of Hamiltonian-based density matrix simulation."""
results = self.ham_solver.solve(
[0.0, 1.0],
y0=DensityMatrix(np.array([0.0, 1.0])),
atol=1e-10,
rtol=1e-10,
method=self.method,
)
self.assertTrue(isinstance(results.y[-1], DensityMatrix))
self.assertTrue(np.abs(results.y[-1].data[0, 0]) > 0.999)
def test_hamiltonian_SuperOp(self):
"""Test Hamiltonian-based SuperOp simulation."""
results = self.rwa_ham_solver.solve(
[0.0, 1.0], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method
)
self.assertTrue(isinstance(results.y[-1], SuperOp))
X = np.array([[0.0, 1.0], [1.0, 0.0]])
self.assertAllClose(results.y[-1].data, np.kron(X, X))
def test_hamiltonian_lindblad_SuperOp_consistency(self):
"""Test Hamiltonian-based SuperOp simulation."""
results = self.ham_solver.solve(
[0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method
)
results2 = self.vec_lindblad_solver_no_diss.solve(
[0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10
)
self.assertAllClose(results.y[-1].data, results2.y[-1].data)
class TestSolverJax(TestSolver, TestJaxBase):
"""JAX version of TestSolver."""
def setUp(self):
"""Set method to 'jax_odeint' to speed up running of jax version of tests."""
super().setUp()
self.method = "jax_odeint"
def test_jit_solve(self):
"""Test jitting setting signals and solving."""
def func(a):
ham_solver = self.ham_solver.copy()
ham_solver.signals = [Signal(lambda t: a, 5.0)]
yf = ham_solver.solve(
np.array([0.0, 1.0]), y0=np.array([0.0, 1.0]), method=self.method
).y[-1]
return yf
jit_func = self.jit_wrap(func)
self.assertAllClose(jit_func(2.0), func(2.0))
def test_jit_grad_solve(self):
"""Test jitting setting signals and solving."""
def func(a):
lindblad_solver = self.lindblad_solver.copy()
lindblad_solver.signals = [[Signal(lambda t: a, 5.0)], [1.0]]
yf = lindblad_solver.solve(
[0.0, 1.0], y0=np.array([[0.0, 1.0], [0.0, 1.0]]), method=self.method
).y[-1]
return yf
jit_grad_func = self.jit_grad_wrap(func)
jit_grad_func(1.0)
| 38.584314 | 96 | 0.6087 |
import numpy as np
from qiskit import QiskitError
from qiskit.quantum_info import Operator, Statevector, SuperOp, DensityMatrix
from qiskit_dynamics import Solver
from qiskit_dynamics.signals import Signal
from ..common import QiskitDynamicsTestCase, TestJaxBase
class TestSolverExceptions(QiskitDynamicsTestCase):
def setUp(self):
X = Operator.from_label("X")
self.ham_solver = Solver(hamiltonian_operators=[X], hamiltonian_signals=[1.0])
self.lindblad_solver = Solver(
hamiltonian_operators=[X], hamiltonian_signals=[1.0], dissipator_operators=[X]
)
self.vec_lindblad_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[1.0],
dissipator_operators=[X],
evaluation_mode="dense_vectorized",
)
def test_hamiltonian_shape_error(self):
with self.assertRaises(QiskitError) as qe:
self.ham_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.ham_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.ham_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
def test_lindblad_shape_error(self):
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
def test_vectorized_lindblad_shape_error(self):
with self.assertRaises(QiskitError) as qe:
self.vec_lindblad_solver.solve([0.0, 1.0], np.array([[1.0, 0.0], [0.0, 1.0]]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.vec_lindblad_solver.solve([0.0, 1.0], DensityMatrix(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.vec_lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
def test_non_vectorized_SuperOp_error(self):
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], SuperOp(np.eye(4)))
self.assertTrue("Simulating SuperOp" in str(qe.exception))
class TestSolver(QiskitDynamicsTestCase):
def setUp(self):
X = 2 * np.pi * Operator.from_label("X") / 2
Z = 2 * np.pi * Operator.from_label("Z") / 2
self.ham_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
drift=5 * Z,
rotating_frame=5 * Z,
)
self.rwa_ham_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
drift=5 * Z,
rotating_frame=5 * Z,
rwa_cutoff_freq=2 * 5.0,
)
self.lindblad_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
dissipator_operators=[0.01 * X],
drift=5 * Z,
rotating_frame=5 * Z,
)
self.vec_lindblad_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
dissipator_operators=[0.01 * X],
drift=5 * Z,
rotating_frame=5 * Z,
evaluation_mode="dense_vectorized",
)
self.vec_lindblad_solver_no_diss = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
dissipator_operators=[0.0 * X],
drift=5 * Z,
rotating_frame=5 * Z,
evaluation_mode="dense_vectorized",
)
self.method = "DOP853"
def test_lindblad_solve_statevector(self):
results = self.lindblad_solver.solve(
[0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method
)
self.assertTrue(isinstance(results.y[-1], DensityMatrix))
self.assertTrue(results.y[-1].data[0, 0] > 0.99 and results.y[-1].data[0, 0] < 0.999)
def test_vec_lindblad_statevector(self):
results = self.vec_lindblad_solver.solve(
[0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method
)
results2 = self.lindblad_solver.solve(
[0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method
)
self.assertTrue(isinstance(results.y[-1], DensityMatrix))
self.assertAllClose(results.y[-1].data, results2.y[-1].data)
def test_array_vectorized_lindblad(self):
results = self.lindblad_solver.solve(
[0.0, 1.0],
y0=np.array([[[0.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 0.0]]]),
method=self.method,
)
self.assertTrue(results.y[-1][0, 0, 0] > 0.99 and results.y[-1][0, 0, 0] < 0.999)
self.assertTrue(results.y[-1][1, 1, 1] > 0.99 and results.y[-1][1, 1, 1] < 0.999)
def test_rwa_hamiltonian(self):
results = self.rwa_ham_solver.solve(
[0.0, 1.0], y0=np.array([0.0, 1.0]), atol=1e-10, rtol=1e-10, method=self.method
)
self.assertTrue(np.abs(results.y[-1][0]) > (1 - 1e-8))
def test_hamiltonian_DensityMatrix(self):
results = self.ham_solver.solve(
[0.0, 1.0],
y0=DensityMatrix(np.array([0.0, 1.0])),
atol=1e-10,
rtol=1e-10,
method=self.method,
)
self.assertTrue(isinstance(results.y[-1], DensityMatrix))
self.assertTrue(np.abs(results.y[-1].data[0, 0]) > 0.999)
def test_hamiltonian_SuperOp(self):
results = self.rwa_ham_solver.solve(
[0.0, 1.0], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method
)
self.assertTrue(isinstance(results.y[-1], SuperOp))
X = np.array([[0.0, 1.0], [1.0, 0.0]])
self.assertAllClose(results.y[-1].data, np.kron(X, X))
def test_hamiltonian_lindblad_SuperOp_consistency(self):
results = self.ham_solver.solve(
[0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method
)
results2 = self.vec_lindblad_solver_no_diss.solve(
[0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10
)
self.assertAllClose(results.y[-1].data, results2.y[-1].data)
class TestSolverJax(TestSolver, TestJaxBase):
def setUp(self):
super().setUp()
self.method = "jax_odeint"
def test_jit_solve(self):
def func(a):
ham_solver = self.ham_solver.copy()
ham_solver.signals = [Signal(lambda t: a, 5.0)]
yf = ham_solver.solve(
np.array([0.0, 1.0]), y0=np.array([0.0, 1.0]), method=self.method
).y[-1]
return yf
jit_func = self.jit_wrap(func)
self.assertAllClose(jit_func(2.0), func(2.0))
def test_jit_grad_solve(self):
def func(a):
lindblad_solver = self.lindblad_solver.copy()
lindblad_solver.signals = [[Signal(lambda t: a, 5.0)], [1.0]]
yf = lindblad_solver.solve(
[0.0, 1.0], y0=np.array([[0.0, 1.0], [0.0, 1.0]]), method=self.method
).y[-1]
return yf
jit_grad_func = self.jit_grad_wrap(func)
jit_grad_func(1.0)
| true | true |
f73e2eac4753b15fca94cc1f8bafaf590c205709 | 1,801 | py | Python | src/sentry/utils/auth.py | erhuabushuo/sentry | 8b3bad10155aaacfdff80910e5972e64304e880c | [
"BSD-3-Clause"
] | null | null | null | src/sentry/utils/auth.py | erhuabushuo/sentry | 8b3bad10155aaacfdff80910e5972e64304e880c | [
"BSD-3-Clause"
] | null | null | null | src/sentry/utils/auth.py | erhuabushuo/sentry | 8b3bad10155aaacfdff80910e5972e64304e880c | [
"BSD-3-Clause"
] | null | null | null | """
sentry.utils.auth
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from sentry.models import User
def parse_auth_header(header):
return dict(map(lambda x: x.strip().split('='), header.split(' ', 1)[1].split(',')))
def get_auth_providers():
return [
key for key, cfg_names
in settings.AUTH_PROVIDERS.iteritems()
if all(getattr(settings, c, None) for c in cfg_names)
]
def find_users(username, with_valid_password=True):
"""
Return a list of users that match a username
and falling back to email
"""
qs = User.objects
if with_valid_password:
qs = qs.exclude(password='!')
try:
# First, assume username is an iexact match for username
user = qs.get(username__iexact=username)
return [user]
except User.DoesNotExist:
# If not, we can take a stab at guessing it's an email address
if '@' in username:
# email isn't guaranteed unique
return list(qs.filter(email__iexact=username))
return None
class EmailAuthBackend(ModelBackend):
"""
Authenticate against django.contrib.auth.models.User.
Supports authenticating via an email address or a username.
"""
def authenticate(self, username=None, password=None):
users = find_users(username)
if users:
for user in users:
try:
if user.password and user.check_password(password):
return user
except ValueError:
continue
return None
| 27.707692 | 88 | 0.632982 | from __future__ import absolute_import
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from sentry.models import User
def parse_auth_header(header):
return dict(map(lambda x: x.strip().split('='), header.split(' ', 1)[1].split(',')))
def get_auth_providers():
return [
key for key, cfg_names
in settings.AUTH_PROVIDERS.iteritems()
if all(getattr(settings, c, None) for c in cfg_names)
]
def find_users(username, with_valid_password=True):
qs = User.objects
if with_valid_password:
qs = qs.exclude(password='!')
try:
user = qs.get(username__iexact=username)
return [user]
except User.DoesNotExist:
if '@' in username:
# email isn't guaranteed unique
return list(qs.filter(email__iexact=username))
return None
class EmailAuthBackend(ModelBackend):
def authenticate(self, username=None, password=None):
users = find_users(username)
if users:
for user in users:
try:
if user.password and user.check_password(password):
return user
except ValueError:
continue
return None
| true | true |
f73e3303fffd49fcb2b65f8ed458e93f7a53a71b | 12,680 | py | Python | python_modules/dagster/dagster/core/definitions/solid_container.py | mitodl/dagster | c94cd8d0f5f67722790e8a176228aa4bdcaa0068 | [
"Apache-2.0"
] | 1 | 2021-04-30T00:19:20.000Z | 2021-04-30T00:19:20.000Z | python_modules/dagster/dagster/core/definitions/solid_container.py | mitodl/dagster | c94cd8d0f5f67722790e8a176228aa4bdcaa0068 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/definitions/solid_container.py | mitodl/dagster | c94cd8d0f5f67722790e8a176228aa4bdcaa0068 | [
"Apache-2.0"
] | null | null | null | from abc import ABCMeta, abstractmethod, abstractproperty
from collections import defaultdict
import six
from dagster import check
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.core.types.dagster_type import DagsterTypeKind
from .dependency import DependencyStructure, IDependencyDefinition, Solid, SolidInvocation
class IContainSolids(six.with_metaclass(ABCMeta)): # pylint: disable=no-init
@abstractproperty
def solids(self):
"""List[Solid]: Top-level solids in the container."""
@abstractproperty
def dependency_structure(self):
"""DependencyStructure: The dependencies between top-level solids in the container."""
@abstractmethod
def solid_named(self, name):
"""Return the (top-level) solid with a given name.
Args:
name (str): The name of the top level solid.
Returns:
Solid: The solid with the given name
"""
def validate_dependency_dict(dependencies):
prelude = (
'The expected type for "dependencies" is dict[Union[str, SolidInvocation], dict[str, '
"DependencyDefinition]]. "
)
if dependencies is None:
return {}
if not isinstance(dependencies, dict):
raise DagsterInvalidDefinitionError(
prelude
+ "Received value {val} of type {type} at the top level.".format(
val=dependencies, type=type(dependencies)
)
)
for key, dep_dict in dependencies.items():
if not (isinstance(key, six.string_types) or isinstance(key, SolidInvocation)):
raise DagsterInvalidDefinitionError(
prelude + "Expected str or SolidInvocation key in the top level dict. "
"Received value {val} of type {type}".format(val=key, type=type(key))
)
if not isinstance(dep_dict, dict):
if isinstance(dep_dict, IDependencyDefinition):
raise DagsterInvalidDefinitionError(
prelude
+ "Received a IDependencyDefinition one layer too high under key {key}. "
"The DependencyDefinition should be moved in to a dict keyed on "
"input name.".format(key=key)
)
else:
raise DagsterInvalidDefinitionError(
prelude + "Under key {key} received value {val} of type {type}. "
"Expected dict[str, DependencyDefinition]".format(
key=key, val=dep_dict, type=type(dep_dict)
)
)
for input_key, dep in dep_dict.items():
if not isinstance(input_key, six.string_types):
raise DagsterInvalidDefinitionError(
prelude
+ "Received non-sting key in the inner dict for key {key}.".format(key=key)
)
if not isinstance(dep, IDependencyDefinition):
raise DagsterInvalidDefinitionError(
prelude
+ 'Expected IDependencyDefinition for solid "{key}" input "{input_key}". '
"Received value {val} of type {type}.".format(
key=key, input_key=input_key, val=dep, type=type(dep)
)
)
return dependencies
def create_execution_structure(solid_defs, dependencies_dict, graph_definition):
"""This builder takes the dependencies dictionary specified during creation of the
PipelineDefinition object and builds (1) the execution structure and (2) a solid dependency
dictionary.
For example, for the following dependencies:
dep_dict = {
SolidInvocation('giver'): {},
SolidInvocation('sleeper', alias='sleeper_1'): {
'units': DependencyDefinition('giver', 'out_1')
},
SolidInvocation('sleeper', alias='sleeper_2'): {
'units': DependencyDefinition('giver', 'out_2')
},
SolidInvocation('sleeper', alias='sleeper_3'): {
'units': DependencyDefinition('giver', 'out_3')
},
SolidInvocation('sleeper', alias='sleeper_4'): {
'units': DependencyDefinition('giver', 'out_4')
},
SolidInvocation('total'): {
'in_1': DependencyDefinition('sleeper_1', 'total'),
'in_2': DependencyDefinition('sleeper_2', 'total'),
'in_3': DependencyDefinition('sleeper_3', 'total'),
'in_4': DependencyDefinition('sleeper_4', 'total'),
},
},
This will create:
pipeline_solid_dict = {
'giver': <dagster.core.definitions.dependency.Solid object>,
'sleeper_1': <dagster.core.definitions.dependency.Solid object>,
'sleeper_2': <dagster.core.definitions.dependency.Solid object>,
'sleeper_3': <dagster.core.definitions.dependency.Solid object>,
'sleeper_4': <dagster.core.definitions.dependency.Solid object>,
'total': <dagster.core.definitions.dependency.Solid object>
}
as well as a dagster.core.definitions.dependency.DependencyStructure object.
"""
from .solid import NodeDefinition
from .graph import GraphDefinition
check.list_param(solid_defs, "solid_defs", of_type=NodeDefinition)
check.dict_param(
dependencies_dict,
"dependencies_dict",
key_type=six.string_types + (SolidInvocation,),
value_type=dict,
)
# graph_definition is none in the context of a pipeline
check.inst_param(graph_definition, "graph_definition", GraphDefinition)
# Same as dep_dict but with SolidInvocation replaced by alias string
aliased_dependencies_dict = {}
# Keep track of solid name -> all aliases used and alias -> name
name_to_aliases = defaultdict(set)
alias_to_solid_instance = {}
alias_to_name = {}
for solid_key, input_dep_dict in dependencies_dict.items():
# We allow deps of the form dependencies={'foo': DependencyDefinition('bar')}
# Here, we replace 'foo' with SolidInvocation('foo')
if not isinstance(solid_key, SolidInvocation):
solid_key = SolidInvocation(solid_key)
alias = solid_key.alias or solid_key.name
name_to_aliases[solid_key.name].add(alias)
alias_to_solid_instance[alias] = solid_key
alias_to_name[alias] = solid_key.name
aliased_dependencies_dict[alias] = input_dep_dict
pipeline_solid_dict = _build_pipeline_solid_dict(
solid_defs, name_to_aliases, alias_to_solid_instance, graph_definition
)
_validate_dependencies(aliased_dependencies_dict, pipeline_solid_dict, alias_to_name)
dependency_structure = DependencyStructure.from_definitions(
pipeline_solid_dict, aliased_dependencies_dict
)
return dependency_structure, pipeline_solid_dict
def _build_pipeline_solid_dict(
solid_defs, name_to_aliases, alias_to_solid_instance, graph_definition
):
pipeline_solids = []
for solid_def in solid_defs:
uses_of_solid = name_to_aliases.get(solid_def.name, {solid_def.name})
for alias in uses_of_solid:
solid_instance = alias_to_solid_instance.get(alias)
solid_instance_tags = solid_instance.tags if solid_instance else {}
hook_defs = solid_instance.hook_defs if solid_instance else frozenset()
pipeline_solids.append(
Solid(
name=alias,
definition=solid_def,
graph_definition=graph_definition,
tags=solid_instance_tags,
hook_defs=hook_defs,
)
)
return {ps.name: ps for ps in pipeline_solids}
def _validate_dependencies(dependencies, solid_dict, alias_to_name):
for from_solid, dep_by_input in dependencies.items():
for from_input, dep_def in dep_by_input.items():
for dep in dep_def.get_definitions():
if from_solid == dep.solid:
raise DagsterInvalidDefinitionError(
(
"Invalid dependencies: circular reference detected in solid "
'"{from_solid}" input "{from_input}"'
).format(from_solid=from_solid, from_input=from_input)
)
if not from_solid in solid_dict:
aliased_solid = alias_to_name.get(from_solid)
if aliased_solid == from_solid:
raise DagsterInvalidDefinitionError(
'Invalid dependencies: solid "{solid}" in dependency dictionary not '
"found in solid list".format(solid=from_solid)
)
else:
raise DagsterInvalidDefinitionError(
(
'Invalid dependencies: solid "{aliased_solid}" (aliased by '
'"{from_solid}" in dependency dictionary) not found in solid list'
).format(aliased_solid=aliased_solid, from_solid=from_solid)
)
if not solid_dict[from_solid].definition.has_input(from_input):
input_list = solid_dict[from_solid].definition.input_dict.keys()
raise DagsterInvalidDefinitionError(
'Invalid dependencies: solid "{from_solid}" does not have input '
'"{from_input}". '.format(from_solid=from_solid, from_input=from_input)
+ "Available inputs: {input_list}".format(input_list=input_list)
)
if not dep.solid in solid_dict:
raise DagsterInvalidDefinitionError(
'Invalid dependencies: solid "{dep.solid}" not found in solid list. '
'Listed as dependency for solid "{from_solid}" input "{from_input}" '.format(
dep=dep, from_solid=from_solid, from_input=from_input
)
)
if not solid_dict[dep.solid].definition.has_output(dep.output):
raise DagsterInvalidDefinitionError(
'Invalid dependencies: solid "{dep.solid}" does not have output '
'"{dep.output}". Listed as dependency for solid "{from_solid} input '
'"{from_input}"'.format(
dep=dep, from_solid=from_solid, from_input=from_input
)
)
input_def = solid_dict[from_solid].definition.input_def_named(from_input)
output_def = solid_dict[dep.solid].definition.output_def_named(dep.output)
if dep_def.is_multi() and not input_def.dagster_type.supports_fan_in:
raise DagsterInvalidDefinitionError(
f'Invalid dependencies: for solid "{dep.solid}" input "{input_def.name}", the '
f'DagsterType "{input_def.dagster_type.display_name}" does not support fanning in '
"(MultiDependencyDefinition). Use the List type, since fanning in will result in a list."
)
_validate_input_output_pair(input_def, output_def, from_solid, dep)
def _validate_input_output_pair(input_def, output_def, from_solid, dep):
# Currently, we opt to be overly permissive with input/output type mismatches.
# Here we check for the case where no value will be provided where one is expected.
if (
output_def.dagster_type.kind == DagsterTypeKind.NOTHING
and not input_def.dagster_type.kind == DagsterTypeKind.NOTHING
):
raise DagsterInvalidDefinitionError(
(
'Input "{input_def.name}" to solid "{from_solid}" can not depend on the output '
'"{output_def.name}" from solid "{dep.solid}". '
'Input "{input_def.name}" expects a value of type '
'{input_def.dagster_type.display_name} and output "{output_def.name}" returns '
"type {output_def.dagster_type.display_name}{extra}."
).format(
from_solid=from_solid,
dep=dep,
output_def=output_def,
input_def=input_def,
extra=" (which produces no value)"
if output_def.dagster_type.kind == DagsterTypeKind.NOTHING
else "",
)
)
| 43.129252 | 113 | 0.602681 | from abc import ABCMeta, abstractmethod, abstractproperty
from collections import defaultdict
import six
from dagster import check
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.core.types.dagster_type import DagsterTypeKind
from .dependency import DependencyStructure, IDependencyDefinition, Solid, SolidInvocation
class IContainSolids(six.with_metaclass(ABCMeta)):
@abstractproperty
def solids(self):
@abstractproperty
def dependency_structure(self):
@abstractmethod
def solid_named(self, name):
def validate_dependency_dict(dependencies):
prelude = (
'The expected type for "dependencies" is dict[Union[str, SolidInvocation], dict[str, '
"DependencyDefinition]]. "
)
if dependencies is None:
return {}
if not isinstance(dependencies, dict):
raise DagsterInvalidDefinitionError(
prelude
+ "Received value {val} of type {type} at the top level.".format(
val=dependencies, type=type(dependencies)
)
)
for key, dep_dict in dependencies.items():
if not (isinstance(key, six.string_types) or isinstance(key, SolidInvocation)):
raise DagsterInvalidDefinitionError(
prelude + "Expected str or SolidInvocation key in the top level dict. "
"Received value {val} of type {type}".format(val=key, type=type(key))
)
if not isinstance(dep_dict, dict):
if isinstance(dep_dict, IDependencyDefinition):
raise DagsterInvalidDefinitionError(
prelude
+ "Received a IDependencyDefinition one layer too high under key {key}. "
"The DependencyDefinition should be moved in to a dict keyed on "
"input name.".format(key=key)
)
else:
raise DagsterInvalidDefinitionError(
prelude + "Under key {key} received value {val} of type {type}. "
"Expected dict[str, DependencyDefinition]".format(
key=key, val=dep_dict, type=type(dep_dict)
)
)
for input_key, dep in dep_dict.items():
if not isinstance(input_key, six.string_types):
raise DagsterInvalidDefinitionError(
prelude
+ "Received non-sting key in the inner dict for key {key}.".format(key=key)
)
if not isinstance(dep, IDependencyDefinition):
raise DagsterInvalidDefinitionError(
prelude
+ 'Expected IDependencyDefinition for solid "{key}" input "{input_key}". '
"Received value {val} of type {type}.".format(
key=key, input_key=input_key, val=dep, type=type(dep)
)
)
return dependencies
def create_execution_structure(solid_defs, dependencies_dict, graph_definition):
from .solid import NodeDefinition
from .graph import GraphDefinition
check.list_param(solid_defs, "solid_defs", of_type=NodeDefinition)
check.dict_param(
dependencies_dict,
"dependencies_dict",
key_type=six.string_types + (SolidInvocation,),
value_type=dict,
)
check.inst_param(graph_definition, "graph_definition", GraphDefinition)
aliased_dependencies_dict = {}
name_to_aliases = defaultdict(set)
alias_to_solid_instance = {}
alias_to_name = {}
for solid_key, input_dep_dict in dependencies_dict.items():
if not isinstance(solid_key, SolidInvocation):
solid_key = SolidInvocation(solid_key)
alias = solid_key.alias or solid_key.name
name_to_aliases[solid_key.name].add(alias)
alias_to_solid_instance[alias] = solid_key
alias_to_name[alias] = solid_key.name
aliased_dependencies_dict[alias] = input_dep_dict
pipeline_solid_dict = _build_pipeline_solid_dict(
solid_defs, name_to_aliases, alias_to_solid_instance, graph_definition
)
_validate_dependencies(aliased_dependencies_dict, pipeline_solid_dict, alias_to_name)
dependency_structure = DependencyStructure.from_definitions(
pipeline_solid_dict, aliased_dependencies_dict
)
return dependency_structure, pipeline_solid_dict
def _build_pipeline_solid_dict(
solid_defs, name_to_aliases, alias_to_solid_instance, graph_definition
):
pipeline_solids = []
for solid_def in solid_defs:
uses_of_solid = name_to_aliases.get(solid_def.name, {solid_def.name})
for alias in uses_of_solid:
solid_instance = alias_to_solid_instance.get(alias)
solid_instance_tags = solid_instance.tags if solid_instance else {}
hook_defs = solid_instance.hook_defs if solid_instance else frozenset()
pipeline_solids.append(
Solid(
name=alias,
definition=solid_def,
graph_definition=graph_definition,
tags=solid_instance_tags,
hook_defs=hook_defs,
)
)
return {ps.name: ps for ps in pipeline_solids}
def _validate_dependencies(dependencies, solid_dict, alias_to_name):
for from_solid, dep_by_input in dependencies.items():
for from_input, dep_def in dep_by_input.items():
for dep in dep_def.get_definitions():
if from_solid == dep.solid:
raise DagsterInvalidDefinitionError(
(
"Invalid dependencies: circular reference detected in solid "
'"{from_solid}" input "{from_input}"'
).format(from_solid=from_solid, from_input=from_input)
)
if not from_solid in solid_dict:
aliased_solid = alias_to_name.get(from_solid)
if aliased_solid == from_solid:
raise DagsterInvalidDefinitionError(
'Invalid dependencies: solid "{solid}" in dependency dictionary not '
"found in solid list".format(solid=from_solid)
)
else:
raise DagsterInvalidDefinitionError(
(
'Invalid dependencies: solid "{aliased_solid}" (aliased by '
'"{from_solid}" in dependency dictionary) not found in solid list'
).format(aliased_solid=aliased_solid, from_solid=from_solid)
)
if not solid_dict[from_solid].definition.has_input(from_input):
input_list = solid_dict[from_solid].definition.input_dict.keys()
raise DagsterInvalidDefinitionError(
'Invalid dependencies: solid "{from_solid}" does not have input '
'"{from_input}". '.format(from_solid=from_solid, from_input=from_input)
+ "Available inputs: {input_list}".format(input_list=input_list)
)
if not dep.solid in solid_dict:
raise DagsterInvalidDefinitionError(
'Invalid dependencies: solid "{dep.solid}" not found in solid list. '
'Listed as dependency for solid "{from_solid}" input "{from_input}" '.format(
dep=dep, from_solid=from_solid, from_input=from_input
)
)
if not solid_dict[dep.solid].definition.has_output(dep.output):
raise DagsterInvalidDefinitionError(
'Invalid dependencies: solid "{dep.solid}" does not have output '
'"{dep.output}". Listed as dependency for solid "{from_solid} input '
'"{from_input}"'.format(
dep=dep, from_solid=from_solid, from_input=from_input
)
)
input_def = solid_dict[from_solid].definition.input_def_named(from_input)
output_def = solid_dict[dep.solid].definition.output_def_named(dep.output)
if dep_def.is_multi() and not input_def.dagster_type.supports_fan_in:
raise DagsterInvalidDefinitionError(
f'Invalid dependencies: for solid "{dep.solid}" input "{input_def.name}", the '
f'DagsterType "{input_def.dagster_type.display_name}" does not support fanning in '
"(MultiDependencyDefinition). Use the List type, since fanning in will result in a list."
)
_validate_input_output_pair(input_def, output_def, from_solid, dep)
def _validate_input_output_pair(input_def, output_def, from_solid, dep):
# Currently, we opt to be overly permissive with input/output type mismatches.
# Here we check for the case where no value will be provided where one is expected.
if (
output_def.dagster_type.kind == DagsterTypeKind.NOTHING
and not input_def.dagster_type.kind == DagsterTypeKind.NOTHING
):
raise DagsterInvalidDefinitionError(
(
'Input "{input_def.name}" to solid "{from_solid}" can not depend on the output '
'"{output_def.name}" from solid "{dep.solid}". '
'Input "{input_def.name}" expects a value of type '
'{input_def.dagster_type.display_name} and output "{output_def.name}" returns '
"type {output_def.dagster_type.display_name}{extra}."
).format(
from_solid=from_solid,
dep=dep,
output_def=output_def,
input_def=input_def,
extra=" (which produces no value)"
if output_def.dagster_type.kind == DagsterTypeKind.NOTHING
else "",
)
)
| true | true |
f73e341446ccc6dc420d367e1f920c64f27fe683 | 1,712 | py | Python | data_structure/lista_encadeada_simples.py | uadson/data-structure | e7c62ff732b9b89e57b9b08dfc6f777e57a52397 | [
"MIT"
] | null | null | null | data_structure/lista_encadeada_simples.py | uadson/data-structure | e7c62ff732b9b89e57b9b08dfc6f777e57a52397 | [
"MIT"
] | null | null | null | data_structure/lista_encadeada_simples.py | uadson/data-structure | e7c62ff732b9b89e57b9b08dfc6f777e57a52397 | [
"MIT"
] | null | null | null | class No:
def __init__(self, valor):
self.valor = valor
self.proximo = None
def mostra_no(self):
print(self.valor)
class ListaEncadeada:
def __init__(self):
self.primeiro = None
def insere_inicio(self, valor):
novo = No(valor)
novo.proximo = self.primeiro
self.primeiro = novo
def mostrar(self):
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
while atual != None:
atual.mostra_no()
atual = atual.proximo
def pesquisa(self, valor):
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
while atual.valor != valor:
if atual.proximo == None:
return None
else:
atual = atual.proximo
return atual
def excluir_inicio(self):
if self.primeiro == None:
print('A lista está vazia')
return None
temp = self.primeiro
self.primeiro = self.primeiro.proximo
return temp
def excluir_posicao(self, valor):
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
anterior = self.primeiro
while atual.valor != valor:
if atual.proximo == None:
return None
else:
anterior = atual
atual = atual.proximo
if atual == self.primeiro:
self.primeiro = self.primeiro.proximo
else:
anterior.proximo = atual.proximo
return atual
lista = ListaEncadeada()
lista.insere_inicio(1)
lista.insere_inicio(2)
lista.insere_inicio(3)
lista.insere_inicio(4)
lista.insere_inicio(5)
pesquisa = lista.pesquisa(3)
lista.excluir_inicio()
lista.excluir_posicao(4)
lista.excluir_posicao(2) | 21.4 | 43 | 0.64778 | class No:
def __init__(self, valor):
self.valor = valor
self.proximo = None
def mostra_no(self):
print(self.valor)
class ListaEncadeada:
def __init__(self):
self.primeiro = None
def insere_inicio(self, valor):
novo = No(valor)
novo.proximo = self.primeiro
self.primeiro = novo
def mostrar(self):
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
while atual != None:
atual.mostra_no()
atual = atual.proximo
def pesquisa(self, valor):
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
while atual.valor != valor:
if atual.proximo == None:
return None
else:
atual = atual.proximo
return atual
def excluir_inicio(self):
if self.primeiro == None:
print('A lista está vazia')
return None
temp = self.primeiro
self.primeiro = self.primeiro.proximo
return temp
def excluir_posicao(self, valor):
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
anterior = self.primeiro
while atual.valor != valor:
if atual.proximo == None:
return None
else:
anterior = atual
atual = atual.proximo
if atual == self.primeiro:
self.primeiro = self.primeiro.proximo
else:
anterior.proximo = atual.proximo
return atual
lista = ListaEncadeada()
lista.insere_inicio(1)
lista.insere_inicio(2)
lista.insere_inicio(3)
lista.insere_inicio(4)
lista.insere_inicio(5)
pesquisa = lista.pesquisa(3)
lista.excluir_inicio()
lista.excluir_posicao(4)
lista.excluir_posicao(2) | true | true |
f73e34be6608bfdd423623a778535b65b4ac14ec | 1,578 | py | Python | ResNet50 V2/resnet50_v2_model.py | Sourodip-ghosh123/Fruits-360 | f15ce919757f0a0ce057f4ba4b49ce3d5aba53e2 | [
"MIT"
] | null | null | null | ResNet50 V2/resnet50_v2_model.py | Sourodip-ghosh123/Fruits-360 | f15ce919757f0a0ce057f4ba4b49ce3d5aba53e2 | [
"MIT"
] | null | null | null | ResNet50 V2/resnet50_v2_model.py | Sourodip-ghosh123/Fruits-360 | f15ce919757f0a0ce057f4ba4b49ce3d5aba53e2 | [
"MIT"
] | null | null | null | from keras.applications.resnet_v2 import ResNet50V2
model=ResNet50V2(include_top=True, weights=None, input_tensor=None, input_shape=(100,100,3),classes=41)
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Compiled!')
from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D
from keras.layers import Activation, Dense, Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras import backend as K
batch_size = 50
checkpointer = ModelCheckpoint(filepath = 'cnn_from_scratch_fruits.hdf5', save_best_only = True)
history = model.fit(x_train,y_train,
batch_size = 50,
epochs=15,
validation_data=(x_valid, y_vaild),
callbacks = [checkpointer],
shuffle=True
)
model.load_weights('cnn_from_scratch_fruits.hdf5')
score = model.evaluate(x_test, y_test, verbose=0)
print('\n', 'Test accuracy:', score[1])
import matplotlib.pyplot as plt
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
| 30.346154 | 103 | 0.726869 | from keras.applications.resnet_v2 import ResNet50V2
model=ResNet50V2(include_top=True, weights=None, input_tensor=None, input_shape=(100,100,3),classes=41)
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Compiled!')
from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D
from keras.layers import Activation, Dense, Flatten, Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras import backend as K
batch_size = 50
checkpointer = ModelCheckpoint(filepath = 'cnn_from_scratch_fruits.hdf5', save_best_only = True)
history = model.fit(x_train,y_train,
batch_size = 50,
epochs=15,
validation_data=(x_valid, y_vaild),
callbacks = [checkpointer],
shuffle=True
)
model.load_weights('cnn_from_scratch_fruits.hdf5')
score = model.evaluate(x_test, y_test, verbose=0)
print('\n', 'Test accuracy:', score[1])
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
| true | true |
f73e3599db366d927da9e5c08e59a2b79e26d142 | 7,503 | py | Python | ImmoKaa/scraper.py | gandreassi/ImmoKaa | 904115e5a6f91ca78b41aebdaf4ffe3934a4c318 | [
"MIT"
] | 1 | 2021-06-08T09:12:00.000Z | 2021-06-08T09:12:00.000Z | ImmoKaa/scraper.py | gandreassi/ImmoKaa | 904115e5a6f91ca78b41aebdaf4ffe3934a4c318 | [
"MIT"
] | null | null | null | ImmoKaa/scraper.py | gandreassi/ImmoKaa | 904115e5a6f91ca78b41aebdaf4ffe3934a4c318 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import urllib.request as urllib2
import random
from random import choice
import pandas as pd
import copy, time, sys, shutil, os, yaml, json
import datetime as dt
from glob import glob
import regex
class scraper():
criteria = None
df = None
df_pre = None
__verbose = False
__parameter_names = { #this dict translate the parameters into thei corresponding url bit
'min_price' : 'pf',
'max_price' : 'pt',
'min_rooms' : 'nrf',
'max_rooms' : 'nrt',
'radius' : 'r',
'days_old' : 'pa',
}
__instance_name = None
__root_dir = "./ImmoKaa_data/"
__base_dir = None
def __init__(self, instance_name, criteria_file):
self.__instance_name = instance_name
self.__base_dir = self.__root_dir+instance_name
os.makedirs(self.__base_dir, exist_ok=True)
with open(criteria_file) as file:
self.criteria = yaml.load(file, Loader=yaml.FullLoader)
self.get_preexisting_data()
def _urlquery(self, url, verbose=False):
# function cycles randomly through different user agents and time intervals to simulate more natural queries
try:
sleeptime = float(random.randint(1,6))/5
time.sleep(sleeptime)
agents = ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17',
'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0',
'Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Mozilla/3.0',
'Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3',
'Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522+ (KHTML, like Gecko) Safari/419.3',
'Opera/9.00 (Windows NT 5.1; U; en)']
agent = choice(agents)
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', agent)]
html = opener.open(url).read()
time.sleep(sleeptime)
return html
except Exception as e:
if verbose: print('Something went wrong with Crawling:\n%s' % e)
return None
def _immoscout24parser(self, url, verbose=False):
'''
Read search results from Immoscout24.ch, given a specific url indicating the search criteria and the page number.
'''
if verbose: print ("Scanning the following url:", url)
try:
soup = BeautifulSoup(self._urlquery(url, verbose), 'html.parser')
scripts = soup.findAll('script')
scripts = filter(None, [script.string for script in scripts])
sr = next(script for script in scripts if 'searchResult' in script)
#Come cleaning... with not-so-clean code. Because ImmoScout keeps changing stuff and I can't be bothered to fix this properly every time.
s = sr.replace(":undefined", ':"undefined"').lstrip("__INITIAL_STATE__=")
s = regex.sub('\{"render".*?(?:\{(?:(?R)|[^{}])*})\}', '""', s)
poss = [m.start() for m in regex.finditer('e=>', s)]
res = s[:poss[0]]
for i in range(len(poss)):
end = len(s)
if i+1 < len(poss):
end = poss[i+1]
dd = regex.sub('(?:\{(?:(?R)|[^{}])*})', '""', s[poss[i]+3:end], 1)
res += dd
js = json.loads(res)
return js
except Exception as e:
if verbose: print("Error in immoscout24 parser: %s" % e)
return None
def _make_url(self, criteria, page):
url = 'https://www.immoscout24.ch/en/real-estate/{mode}/city-{city}?'.format(**criteria)
for key in [x for x in criteria.keys() if x not in ['city', 'mode']]:
try:
url+=self.__parameter_names[key]+'='+str(criteria[key])+"&"
except KeyError:
raise Exception("Error in make_url", "Unsupported search parameter!")
url = url[:-1]+"&pn="+str(page) #add page number
return url
def _get_listings(self, criteria, verbose):
"""
Pull a list of listings for given criteria and cities, and put them in a dataframe.
"""
print ("city:",criteria['city'])
page = 0
data_pages = []
numberOfPages = 1
while page<numberOfPages:
page+=1
url = self._make_url(criteria, page)
resultlist_json = None
N_attempts = 0
while resultlist_json is None and N_attempts<5:
try:
N_attempts+=1
resultlist_json = self._immoscout24parser(url, verbose)
numberOfPages = int(resultlist_json["pages"]["searchResult"]["resultData"]["pagingData"]["totalPages"])
print("\tpage: {0}/{1}".format(page,numberOfPages), end=" ")
data = resultlist_json["pages"]["searchResult"]["resultData"]["listData"]
data = pd.DataFrame.from_dict(data)
data["searched-city"]=criteria['city'] #store which city we searched, for reference
data["fetch-date"]=dt.datetime.now().date()
print("({0} results)".format(data.shape[0]))
data_pages.append(copy.copy(data))
except Exception as e:
print (e)
pass
data_all = pd.concat(data_pages)
return data_all
def scrape(self):
dfs = []
for city in self.criteria['cities']:
criteria_city = copy.copy(self.criteria)
criteria_city['city'] = city
del criteria_city['cities']
dfs.append(self._get_listings(criteria_city, verbose=self.__verbose))
self.df = pd.concat(dfs)
def set_verbose(self, flag):
if not isinstance(flag, bool):
raise Exception("ImmoKaa - set_verbose", "Argument must be bool.")
self.__verbose=flag
def save_scraped_dataframe(self):
if self.df is None:
raise Exception("There is no scraped dataset to save.")
today = dt.datetime.now().date().strftime("%Y-%m-%d")
self.df.to_csv(self.__base_dir+"/serach_results_"+today+".csv", mode="w")
print ("History file created/overwritten.")
def get_preexisting_data(self):
pres = []
try:
for f in glob(self.__base_dir+"/serach_results_*.csv"):
pres.append(pd.read_csv(f))
pres[-1]["fetch-date"] = pd.to_datetime(pres[-1]['fetch-date'],\
format="%Y-%m-%d").dt.date
self.df_pre = pd.concat(pres)
print ("Found {0} pre-existing data file(s). You can access the full dataset using get_full_dataset().". format(len(pres)))
except FileNotFoundError:
pass
def get_full_dataset(self):
return pd.concat([self.df, self.df_pre]) | 39.078125 | 185 | 0.553645 | from bs4 import BeautifulSoup
import urllib.request as urllib2
import random
from random import choice
import pandas as pd
import copy, time, sys, shutil, os, yaml, json
import datetime as dt
from glob import glob
import regex
class scraper():
criteria = None
df = None
df_pre = None
__verbose = False
__parameter_names = {
'min_price' : 'pf',
'max_price' : 'pt',
'min_rooms' : 'nrf',
'max_rooms' : 'nrt',
'radius' : 'r',
'days_old' : 'pa',
}
__instance_name = None
__root_dir = "./ImmoKaa_data/"
__base_dir = None
def __init__(self, instance_name, criteria_file):
self.__instance_name = instance_name
self.__base_dir = self.__root_dir+instance_name
os.makedirs(self.__base_dir, exist_ok=True)
with open(criteria_file) as file:
self.criteria = yaml.load(file, Loader=yaml.FullLoader)
self.get_preexisting_data()
def _urlquery(self, url, verbose=False):
try:
sleeptime = float(random.randint(1,6))/5
time.sleep(sleeptime)
agents = ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17',
'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0',
'Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Mozilla/3.0',
'Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3',
'Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522+ (KHTML, like Gecko) Safari/419.3',
'Opera/9.00 (Windows NT 5.1; U; en)']
agent = choice(agents)
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', agent)]
html = opener.open(url).read()
time.sleep(sleeptime)
return html
except Exception as e:
if verbose: print('Something went wrong with Crawling:\n%s' % e)
return None
def _immoscout24parser(self, url, verbose=False):
if verbose: print ("Scanning the following url:", url)
try:
soup = BeautifulSoup(self._urlquery(url, verbose), 'html.parser')
scripts = soup.findAll('script')
scripts = filter(None, [script.string for script in scripts])
sr = next(script for script in scripts if 'searchResult' in script)
s = sr.replace(":undefined", ':"undefined"').lstrip("__INITIAL_STATE__=")
s = regex.sub('\{"render".*?(?:\{(?:(?R)|[^{}])*})\}', '""', s)
poss = [m.start() for m in regex.finditer('e=>', s)]
res = s[:poss[0]]
for i in range(len(poss)):
end = len(s)
if i+1 < len(poss):
end = poss[i+1]
dd = regex.sub('(?:\{(?:(?R)|[^{}])*})', '""', s[poss[i]+3:end], 1)
res += dd
js = json.loads(res)
return js
except Exception as e:
if verbose: print("Error in immoscout24 parser: %s" % e)
return None
def _make_url(self, criteria, page):
url = 'https://www.immoscout24.ch/en/real-estate/{mode}/city-{city}?'.format(**criteria)
for key in [x for x in criteria.keys() if x not in ['city', 'mode']]:
try:
url+=self.__parameter_names[key]+'='+str(criteria[key])+"&"
except KeyError:
raise Exception("Error in make_url", "Unsupported search parameter!")
url = url[:-1]+"&pn="+str(page) #add page number
return url
def _get_listings(self, criteria, verbose):
print ("city:",criteria['city'])
page = 0
data_pages = []
numberOfPages = 1
while page<numberOfPages:
page+=1
url = self._make_url(criteria, page)
resultlist_json = None
N_attempts = 0
while resultlist_json is None and N_attempts<5:
try:
N_attempts+=1
resultlist_json = self._immoscout24parser(url, verbose)
numberOfPages = int(resultlist_json["pages"]["searchResult"]["resultData"]["pagingData"]["totalPages"])
print("\tpage: {0}/{1}".format(page,numberOfPages), end=" ")
data = resultlist_json["pages"]["searchResult"]["resultData"]["listData"]
data = pd.DataFrame.from_dict(data)
data["searched-city"]=criteria['city'] #store which city we searched, for reference
data["fetch-date"]=dt.datetime.now().date()
print("({0} results)".format(data.shape[0]))
data_pages.append(copy.copy(data))
except Exception as e:
print (e)
pass
data_all = pd.concat(data_pages)
return data_all
def scrape(self):
dfs = []
for city in self.criteria['cities']:
criteria_city = copy.copy(self.criteria)
criteria_city['city'] = city
del criteria_city['cities']
dfs.append(self._get_listings(criteria_city, verbose=self.__verbose))
self.df = pd.concat(dfs)
def set_verbose(self, flag):
if not isinstance(flag, bool):
raise Exception("ImmoKaa - set_verbose", "Argument must be bool.")
self.__verbose=flag
def save_scraped_dataframe(self):
if self.df is None:
raise Exception("There is no scraped dataset to save.")
today = dt.datetime.now().date().strftime("%Y-%m-%d")
self.df.to_csv(self.__base_dir+"/serach_results_"+today+".csv", mode="w")
print ("History file created/overwritten.")
def get_preexisting_data(self):
pres = []
try:
for f in glob(self.__base_dir+"/serach_results_*.csv"):
pres.append(pd.read_csv(f))
pres[-1]["fetch-date"] = pd.to_datetime(pres[-1]['fetch-date'],\
format="%Y-%m-%d").dt.date
self.df_pre = pd.concat(pres)
print ("Found {0} pre-existing data file(s). You can access the full dataset using get_full_dataset().". format(len(pres)))
except FileNotFoundError:
pass
def get_full_dataset(self):
return pd.concat([self.df, self.df_pre]) | true | true |
f73e35ad2b6643c1b675aee576ecf540ae03462d | 18,565 | py | Python | shaDow/utils.py | yxia-fb/shaDow-GNN | 2b867011c7084d4ed1b407e29f3ee09632fcc3dc | [
"MIT"
] | null | null | null | shaDow/utils.py | yxia-fb/shaDow-GNN | 2b867011c7084d4ed1b407e29f3ee09632fcc3dc | [
"MIT"
] | 1 | 2022-01-22T11:20:00.000Z | 2022-01-22T11:20:00.000Z | shaDow/utils.py | yxia-fb/shaDow-GNN | 2b867011c7084d4ed1b407e29f3ee09632fcc3dc | [
"MIT"
] | null | null | null | import os
import torch
import glob
import numpy as np
import scipy.sparse as sp
import yaml
from sklearn.preprocessing import StandardScaler
from shaDow.globals import git_rev, timestamp, Logger
from torch_scatter import scatter
from copy import deepcopy
from typing import List, Union
from shaDow import TRAIN, VALID, TEST
from shaDow.data_converter import convert2shaDow, to_undirected
def load_data(prefix, dataset, config_data, os_='linux'):
Logger.printf("Loading training data..")
prefix_l = prefix['local']
fs_shadow = ['adj_full_raw.np[yz]', 'adj_train_raw.np[yz]', 'label_full.npy', 'feat_full.npy', 'split.npy']
if not all(glob.glob(f"{prefix_l}/{dataset}/{f}") for f in fs_shadow):
convert2shaDow(dataset, prefix_l)
role = np.load(f"./{prefix_l}/{dataset}/split.npy", allow_pickle=True)
if type(role) == np.ndarray:
role = role[()]
else:
assert type(role) == dict
# role is used as index, which is required to be int64 (node_set won't take much mem anyways)
node_set = {TRAIN: np.asarray(role[TRAIN], dtype=np.int64),
VALID: np.asarray(role[VALID], dtype=np.int64),
TEST : np.asarray(role[TEST], dtype=np.int64)}
# load adj. If we want to convert to_undirected, and the undirected adj has been stored as external file,
# then we skip the conversion in the program and directly load the undirected adj.
bin_adj_files = {TRAIN: {'indptr': None, 'indices': None, 'data': None},
VALID: {'indptr': None, 'indices': None, 'data': None},
TEST: {'indptr': None, 'indices': None, 'data': None}}
def fill_bin_adj_dict(mode_, split_, type_):
for d in ['indptr', 'indices', 'data']:
bin_adj_files[mode_][d] = f"{prefix_l}/{dataset}/cpp/adj_{split_}_{type_}_{d}.bin"
if config_data['to_undirected']:
if (adj_full := load_adj(prefix_l, dataset, 'undirected', 'full')) is None:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
adj_full = to_undirected(adj_full)
fill_bin_adj_dict(VALID, 'full', 'undirected')
fill_bin_adj_dict(TEST, 'full', 'undirected')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'undirected')
elif (adj_train := load_adj(prefix_l, dataset, 'undirected', 'train')) is None:
adj_train = load_adj(prefix_l, dataset, 'raw', 'train')
adj_train = to_undirected(adj_train)
fill_bin_adj_dict(TRAIN, 'train', 'undirected')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
else:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
fill_bin_adj_dict(VALID, 'full', 'raw')
fill_bin_adj_dict(TEST, 'full', 'raw')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'raw')
else:
adj_train = load_adj(prefix, dataset, 'raw', 'train')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
fill_bin_adj_dict(TRAIN, 'train', 'raw')
bin_adj_files = validate_bin_file(bin_adj_files)
Logger.printf(f"SETTING TO {'TRANS' if config_data['transductive'] else 'IN'}DUCTIVE LEARNING", style="red")
label_full = np.load(f"./{prefix_l}/{dataset}/label_full.npy")
label_full = torch.from_numpy(label_full)
# ======= deal with feats =======
mode_norm = 'all' if config_data['transductive'] else 'train'
if config_data['norm_feat'] and os.path.isfile(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy"):
feats = np.load(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy")
Logger.printf(f"Loading '{mode_norm}'-normalized features", style='yellow')
else:
feats = np.load(f"./{prefix_l}/{dataset}/feat_full.npy")
if config_data['norm_feat']:
feats_fit = feats if config_data['transductive'] else feats[node_set[TRAIN]]
scaler = StandardScaler()
scaler.fit(feats_fit)
feats = scaler.transform(feats)
Logger.printf(f"Normalizing node features (mode = {mode_norm})", style="yellow")
else:
Logger.printf("Not normalizing node features", style="yellow")
feats = torch.from_numpy(feats.astype(np.float32, copy=False))
Logger.printf("Done loading training data..")
return {'adj_full' : adj_full,
'adj_train' : adj_train,
'feat_full' : feats,
'label_full': label_full,
'node_set' : node_set,
'bin_adj_files': bin_adj_files}
def parse_n_prepare(task, args, name_graph, dir_log, os_='linux'):
# [config]
if args.configs is not None:
config_train = args.configs
else:
assert task in ['inference', 'postproc']
if task == 'inference':
if args.inference_configs is None:
assert not args.compute_complexity_only
dir_candy = args.inference_dir
else:
assert args.inference_dir is None and args.compute_complexity_only
dir_candy = None
config_train = args.inference_configs
else:
if args.postproc_dir is not None:
dir_candy = args.postproc_dir
else:
with open(args.postproc_configs) as f:
config_temp = yaml.load(f, Loader=yaml.FullLoader)
if 'dir_pred_mat' in config_temp: # all such dirs MUST contain the same yaml
dir_candy = config_temp['dir_pred_mat'][0]
elif 'dir_emb_mat' in config_temp: # all ens models should have the same arch (only differs in sampler)
dir_candy = next(iter(config_temp['dir_emb_mat'].values()))[0]
else:
raise NotImplementedError
if dir_candy is not None:
assert os.path.isdir(dir_candy)
f_yml = [f for f in os.listdir(dir_candy) if f.split('.')[-1] in ['yml', 'yaml']]
assert len(f_yml) == 1
config_train = f"{dir_candy}/{f_yml[0]}"
with open(config_train) as f_config_train:
config_train = yaml.load(f_config_train, Loader=yaml.FullLoader)
config_train_copy = deepcopy(config_train)
# [data]
config_data = {"to_undirected" : False,
"transductive" : False,
"norm_feat" : True}
config_data.update(config_train['data'])
# [arch]
arch_gnn = { # default values
"dim" : -1,
"aggr" : "sage",
"residue" : "none",
"pooling" : "center",
"loss" : "softmax",
"num_layers" : -1,
"act" : "I",
"heads" : -1,
"feature_augment" : "hops",
"feature_smoothen" : "none",
"label_smoothen" : "none", # label_smoothen is only considered if use_label != none
"ensemble_act" : "leakyrelu",
"branch_sharing" : False,
"use_label" : "none"
}
arch_gnn.update(config_train["architecture"])
assert arch_gnn['aggr'] in ['sage', 'gat', 'gatscat', 'gcn', 'mlp', 'gin', 'sgc', 'sign']
assert arch_gnn['use_label'].lower() in ['all', 'none', 'no_valid']
assert arch_gnn['pooling'].lower().split('-')[0] in ['mean', 'max', 'sum', 'center', 'sort']
assert arch_gnn['residue'].lower() in ['sum', 'concat', 'max', 'none']
assert arch_gnn['feature_augment'].lower() in ['hops', 'ppr', 'none']
if arch_gnn["feature_augment"] and arch_gnn["feature_augment"].lower() != "none":
arch_gnn["feature_augment"] = set(k for k in arch_gnn["feature_augment"].split("-"))
else:
arch_gnn['feature_augment'] = set()
# [params]
params_train = {
"lr" : 0.01,
"dropedge" : 0.0,
"ensemble_dropout" : "none"
}
params_train.update(config_train["hyperparameter"])
params_train["lr"] = float(params_train["lr"])
# [sampler]
sampler_preproc, sampler_train = [], []
for s in config_train['sampler']:
phase = s.pop('phase')
if phase == 'preprocess':
sampler_preproc.append(s)
elif phase == 'train':
sampler_train.append(s)
else:
raise NotImplementedError
batch_size = config_train["hyperparameter"]["batch_size"]
config_sampler_preproc = {"batch_size": batch_size, "configs": sampler_preproc}
config_sampler_train = {"batch_size": batch_size, "configs": sampler_train}
# add self-edges for certain arch. e.g., for GAT, will be divide-by-0 error in grad without self-edges
if arch_gnn["aggr"] in ["gcn", "gat", "gatscat"]:
for sc in config_sampler_train["configs"]:
num_ens = [len(v) for k, v in sc.items() if k != 'method']
assert max(num_ens) == min(num_ens)
sc["add_self_edge"] = [True] * num_ens[0]
# [copy yml]
name_key = f"{arch_gnn['aggr']}_{arch_gnn['num_layers']}"
dir_log_full = log_dir(task, config_train_copy, name_key, dir_log, name_graph, git_rev, timestamp)
return params_train, config_sampler_preproc, config_sampler_train, config_data, arch_gnn, dir_log_full
def parse_n_prepare_postproc(dir_load, f_config, name_graph, dir_log, arch_gnn, logger):
if f_config is not None:
with open(f_config) as f:
config_postproc = yaml.load(f, Loader=yaml.FullLoader)
name_key = f"postproc-{arch_gnn['aggr']}_{arch_gnn['num_layers']}"
log_dir('postproc', config_postproc, name_key, dir_log, name_graph, git_rev, timestamp)
skip_instantiate = []
if 'check_record' in config_postproc:
load_acc_record = config_postproc['check_record']
else:
load_acc_record = True
if config_postproc['method'] == 'cs': # C&S
acc_record = [] if load_acc_record else None
if dir_load is not None:
if 'dir_pred_mat' not in config_postproc:
config_postproc['dir_pred_mat'] = [dir_load]
elif os.path.realpath(dir_load) not in [os.path.realpath(pc) for pc in config_postproc['dir_pred_mat']]:
config_postproc['dir_pred_mat'].append(dir_load)
config_postproc['pred_mat'] = [None] * len(config_postproc['dir_pred_mat'])
for i, di in enumerate(config_postproc['dir_pred_mat']):
if load_acc_record:
acc_record.append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'cs' == f.split('.')[-1] and f.startswith('pred_mat'):
config_postproc['pred_mat'][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for m in config_postproc['pred_mat']):
skip_instantiate = ['data', 'model']
elif config_postproc['method'] == 'ensemble': # Variant of subgraph ensemble as postproc
acc_record = {s: [] for s in config_postproc['dir_emb_mat']} if load_acc_record else None
assert dir_load is None
config_postproc['emb_mat'] = {k: [None] * len(v) for k, v in config_postproc['dir_emb_mat'].items()}
for sname, dirs_l in config_postproc['dir_emb_mat'].items():
for i, di in enumerate(dirs_l):
if load_acc_record:
acc_record[sname].append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'ens' == f.split('.')[-1] and f.startswith('emb_mat'):
config_postproc['emb_mat'][sname][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for s, mat_l in config_postproc['emb_mat'].items() for m in mat_l):
skip_instantiate = ['model'] # you have to load data (role, labels) anyways
return config_postproc, acc_record, skip_instantiate
def log_dir(task, config_new, yml_name_key, dir_log, name_graph, git_rev, timestamp):
if task == 'train':
prefix = 'running'
elif task == 'inference':
prefix = 'INF'
elif task == 'postproc':
prefix = 'POST'
else:
raise NotImplementedError
log_dir = f"{dir_log}/{name_graph}/{prefix}/{timestamp}-{git_rev.strip():s}/"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
yml_file = f"{log_dir}/{yml_name_key}.yml"
with open(yml_file, 'w') as f:
yaml.dump(config_new, f, default_flow_style=False, sort_keys=False)
return log_dir
# =============== #
# ADJ UTILS #
# =============== #
def get_deg_torch_sparse(adj):
return scatter(adj._values(), adj._indices()[0], reduce="sum")
def adj_norm_rw(adj, deg=None, dropedge=0., sort_indices=True):
"""
Normalize adj according to the method of rw normalization.
Note that sym norm is used in the original GCN paper (kipf),
while rw norm is used in GraphSAGE and some other variants.
# Procedure:
# 1. adj add self-connection --> adj'
# 2. D' deg matrix from adj'
# 3. norm by D^{-1} x adj'
if sort_indices is True, we re-sort the indices of the returned adj
Note that after 'dot' the indices of a node would be in descending order
rather than ascending order
"""
if type(adj) == torch.Tensor:
assert deg is None
assert torch.sum(adj._values()).cpu().long().item() == adj._values().size()[0]
_deg_orig = get_deg_torch_sparse(adj)
if dropedge > 0:
masked_indices = torch.floor(torch.rand(int(adj._values().size()[0] * dropedge)) * adj._values().size()[0]).long()
adj._values()[masked_indices] = 0
_deg_dropped = get_deg_torch_sparse(adj)
else:
_deg_dropped = _deg_orig
_deg = torch.repeat_interleave(_deg_dropped, _deg_orig.long())
_deg = torch.clamp(_deg, min=1)
_val = adj._values()
_val /= _deg
adj_norm = adj
else:
assert dropedge == 0., "not supporting dropedge for scipy csr matrices"
assert adj.shape[0] == adj.shape[1]
diag_shape = (adj.shape[0], adj.shape[1])
D = adj.sum(1).flatten() if deg is None else deg
D = np.clip(D, 1, None) # if deg_v == 0, it doesn't matter what value we clip it to.
norm_diag = sp.dia_matrix((1 / D, 0), shape=diag_shape)
adj_norm = norm_diag.dot(adj)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def adj_norm_sym(adj, sort_indices=True, add_self_edge=False, dropedge=0.):
assert adj.shape[0] == adj.shape[1]
assert adj.data.sum() == adj.size, "symmetric normalization only supports binary input adj"
N = adj.shape[0]
# drop edges symmetrically
if dropedge > 0:
masked_indices = np.random.choice(adj.size, int(adj.size * dropedge))
adj.data[masked_indices] = 0
adjT = adj.tocsc()
data_add = adj.data + adjT.data
survived_indices = np.where(data_add == 2)[0]
adj.data *= 0
adj.data[survived_indices] = 1
# augment adj with self-connection
if add_self_edge:
indptr_new = np.zeros(N + 1)
neigh_list = [set(adj.indices[adj.indptr[v] : adj.indptr[v+1]]) for v in range(N)]
for i in range(len(neigh_list)):
neigh_list[i].add(i)
neigh_list[i] = np.sort(np.fromiter(neigh_list[i], int, len(neigh_list[i])))
indptr_new[i + 1] = neigh_list[i].size
indptr_new = indptr_new.cumsum()
indices_new = np.concatenate(neigh_list)
data_new = np.broadcast_to(np.ones(1), indices_new.size)
adj_aug = sp.csr_matrix((data_new, indices_new, indptr_new), shape=adj.shape)
# NOTE: no need to explicitly convert dtype, since adj_norm_sym is used for subg only
else:
adj_aug = adj
# normalize
D = np.clip(adj_aug.sum(1).flatten(), 1, None)
norm_diag = sp.dia_matrix((np.power(D, -0.5), 0), shape=adj_aug.shape)
adj_norm = norm_diag.dot(adj_aug).dot(norm_diag)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def coo_scipy2torch(adj):
"""
convert a scipy sparse COO matrix to torch
"""
values = adj.data
indices = np.vstack((adj.row, adj.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
return torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))
# ================= #
# ADJ FILE IO UTILS #
# ================= #
def load_adj(prefix, dataset, type_, split_):
"""
Try to load the prestored undirected adj. If the file does not exist, then you MUST return a None
"""
assert split_ in ['full', 'train'], "UNKNOWN ADJ SPLIT. ONLY ACCEPT [full] or [train]"
assert type_ in ['raw', 'undirected'], "UNKNOWN ADJ TYPE. ONLY ACCEPT [raw] or [undirected]"
file_adj = f"{prefix}/{dataset}/adj_{split_}_{type_}." + "{}"
if os.path.isfile(file_adj.format('npz')):
adj = sp.load_npz(file_adj.format('npz'))
elif os.path.isfile(file_adj.format('npy')):
adj_d = np.load(file_adj.format('npy'), allow_pickle=True)
if type(adj_d) == np.ndarray:
adj_d = adj_d[()]
else:
assert type(adj_d) == dict
indptr = adj_d['indptr']
indices = adj_d['indices']
if 'data' in adj_d:
data = adj_d['data']
else:
data = np.broadcast_to(np.ones(1, dtype=np.bool), indices.size)
num_nodes = indptr.size - 1
adj = sp.csr_matrix((data, indices, indptr), shape=(num_nodes, num_nodes))
else:
adj = None
return adj
def validate_bin_file(bin_adj_files):
for md, df in bin_adj_files.items():
assert set(df.keys()) == set(['indptr', 'indices', 'data'])
if not os.path.isfile(df['indptr']) or not os.path.isfile(df['indices']):
return {mmd: None for mmd in bin_adj_files}
if not os.path.isfile(df['data']):
df['data'] = ''
return bin_adj_files
def merge_stat_record(dict_l : List[dict]):
key_l = [set(d.keys()) for d in dict_l]
assert all(k == key_l[0] == set([TRAIN, VALID, TEST]) for k in key_l)
names_stat = set(dict_l[0][TRAIN].keys())
ret = {n: {TRAIN: [], VALID: [], TEST: []} for n in names_stat}
for d in dict_l:
for m in [TRAIN, VALID, TEST]:
assert set(d[m].keys()) == names_stat
for k, v in d[m].items():
ret[k][m].append(v)
return ret | 44.842995 | 126 | 0.605117 | import os
import torch
import glob
import numpy as np
import scipy.sparse as sp
import yaml
from sklearn.preprocessing import StandardScaler
from shaDow.globals import git_rev, timestamp, Logger
from torch_scatter import scatter
from copy import deepcopy
from typing import List, Union
from shaDow import TRAIN, VALID, TEST
from shaDow.data_converter import convert2shaDow, to_undirected
def load_data(prefix, dataset, config_data, os_='linux'):
Logger.printf("Loading training data..")
prefix_l = prefix['local']
fs_shadow = ['adj_full_raw.np[yz]', 'adj_train_raw.np[yz]', 'label_full.npy', 'feat_full.npy', 'split.npy']
if not all(glob.glob(f"{prefix_l}/{dataset}/{f}") for f in fs_shadow):
convert2shaDow(dataset, prefix_l)
role = np.load(f"./{prefix_l}/{dataset}/split.npy", allow_pickle=True)
if type(role) == np.ndarray:
role = role[()]
else:
assert type(role) == dict
node_set = {TRAIN: np.asarray(role[TRAIN], dtype=np.int64),
VALID: np.asarray(role[VALID], dtype=np.int64),
TEST : np.asarray(role[TEST], dtype=np.int64)}
# load adj. If we want to convert to_undirected, and the undirected adj has been stored as external file,
# then we skip the conversion in the program and directly load the undirected adj.
bin_adj_files = {TRAIN: {'indptr': None, 'indices': None, 'data': None},
VALID: {'indptr': None, 'indices': None, 'data': None},
TEST: {'indptr': None, 'indices': None, 'data': None}}
def fill_bin_adj_dict(mode_, split_, type_):
for d in ['indptr', 'indices', 'data']:
bin_adj_files[mode_][d] = f"{prefix_l}/{dataset}/cpp/adj_{split_}_{type_}_{d}.bin"
if config_data['to_undirected']:
if (adj_full := load_adj(prefix_l, dataset, 'undirected', 'full')) is None:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
adj_full = to_undirected(adj_full)
fill_bin_adj_dict(VALID, 'full', 'undirected')
fill_bin_adj_dict(TEST, 'full', 'undirected')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'undirected')
elif (adj_train := load_adj(prefix_l, dataset, 'undirected', 'train')) is None:
adj_train = load_adj(prefix_l, dataset, 'raw', 'train')
adj_train = to_undirected(adj_train)
fill_bin_adj_dict(TRAIN, 'train', 'undirected')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
else:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
fill_bin_adj_dict(VALID, 'full', 'raw')
fill_bin_adj_dict(TEST, 'full', 'raw')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'raw')
else:
adj_train = load_adj(prefix, dataset, 'raw', 'train')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
fill_bin_adj_dict(TRAIN, 'train', 'raw')
bin_adj_files = validate_bin_file(bin_adj_files)
Logger.printf(f"SETTING TO {'TRANS' if config_data['transductive'] else 'IN'}DUCTIVE LEARNING", style="red")
label_full = np.load(f"./{prefix_l}/{dataset}/label_full.npy")
label_full = torch.from_numpy(label_full)
# ======= deal with feats =======
mode_norm = 'all' if config_data['transductive'] else 'train'
if config_data['norm_feat'] and os.path.isfile(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy"):
feats = np.load(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy")
Logger.printf(f"Loading '{mode_norm}'-normalized features", style='yellow')
else:
feats = np.load(f"./{prefix_l}/{dataset}/feat_full.npy")
if config_data['norm_feat']:
feats_fit = feats if config_data['transductive'] else feats[node_set[TRAIN]]
scaler = StandardScaler()
scaler.fit(feats_fit)
feats = scaler.transform(feats)
Logger.printf(f"Normalizing node features (mode = {mode_norm})", style="yellow")
else:
Logger.printf("Not normalizing node features", style="yellow")
feats = torch.from_numpy(feats.astype(np.float32, copy=False))
Logger.printf("Done loading training data..")
return {'adj_full' : adj_full,
'adj_train' : adj_train,
'feat_full' : feats,
'label_full': label_full,
'node_set' : node_set,
'bin_adj_files': bin_adj_files}
def parse_n_prepare(task, args, name_graph, dir_log, os_='linux'):
# [config]
if args.configs is not None:
config_train = args.configs
else:
assert task in ['inference', 'postproc']
if task == 'inference':
if args.inference_configs is None:
assert not args.compute_complexity_only
dir_candy = args.inference_dir
else:
assert args.inference_dir is None and args.compute_complexity_only
dir_candy = None
config_train = args.inference_configs
else:
if args.postproc_dir is not None:
dir_candy = args.postproc_dir
else:
with open(args.postproc_configs) as f:
config_temp = yaml.load(f, Loader=yaml.FullLoader)
if 'dir_pred_mat' in config_temp: # all such dirs MUST contain the same yaml
dir_candy = config_temp['dir_pred_mat'][0]
elif 'dir_emb_mat' in config_temp: # all ens models should have the same arch (only differs in sampler)
dir_candy = next(iter(config_temp['dir_emb_mat'].values()))[0]
else:
raise NotImplementedError
if dir_candy is not None:
assert os.path.isdir(dir_candy)
f_yml = [f for f in os.listdir(dir_candy) if f.split('.')[-1] in ['yml', 'yaml']]
assert len(f_yml) == 1
config_train = f"{dir_candy}/{f_yml[0]}"
with open(config_train) as f_config_train:
config_train = yaml.load(f_config_train, Loader=yaml.FullLoader)
config_train_copy = deepcopy(config_train)
# [data]
config_data = {"to_undirected" : False,
"transductive" : False,
"norm_feat" : True}
config_data.update(config_train['data'])
# [arch]
arch_gnn = { # default values
"dim" : -1,
"aggr" : "sage",
"residue" : "none",
"pooling" : "center",
"loss" : "softmax",
"num_layers" : -1,
"act" : "I",
"heads" : -1,
"feature_augment" : "hops",
"feature_smoothen" : "none",
"label_smoothen" : "none", # label_smoothen is only considered if use_label != none
"ensemble_act" : "leakyrelu",
"branch_sharing" : False,
"use_label" : "none"
}
arch_gnn.update(config_train["architecture"])
assert arch_gnn['aggr'] in ['sage', 'gat', 'gatscat', 'gcn', 'mlp', 'gin', 'sgc', 'sign']
assert arch_gnn['use_label'].lower() in ['all', 'none', 'no_valid']
assert arch_gnn['pooling'].lower().split('-')[0] in ['mean', 'max', 'sum', 'center', 'sort']
assert arch_gnn['residue'].lower() in ['sum', 'concat', 'max', 'none']
assert arch_gnn['feature_augment'].lower() in ['hops', 'ppr', 'none']
if arch_gnn["feature_augment"] and arch_gnn["feature_augment"].lower() != "none":
arch_gnn["feature_augment"] = set(k for k in arch_gnn["feature_augment"].split("-"))
else:
arch_gnn['feature_augment'] = set()
# [params]
params_train = {
"lr" : 0.01,
"dropedge" : 0.0,
"ensemble_dropout" : "none"
}
params_train.update(config_train["hyperparameter"])
params_train["lr"] = float(params_train["lr"])
# [sampler]
sampler_preproc, sampler_train = [], []
for s in config_train['sampler']:
phase = s.pop('phase')
if phase == 'preprocess':
sampler_preproc.append(s)
elif phase == 'train':
sampler_train.append(s)
else:
raise NotImplementedError
batch_size = config_train["hyperparameter"]["batch_size"]
config_sampler_preproc = {"batch_size": batch_size, "configs": sampler_preproc}
config_sampler_train = {"batch_size": batch_size, "configs": sampler_train}
# add self-edges for certain arch. e.g., for GAT, will be divide-by-0 error in grad without self-edges
if arch_gnn["aggr"] in ["gcn", "gat", "gatscat"]:
for sc in config_sampler_train["configs"]:
num_ens = [len(v) for k, v in sc.items() if k != 'method']
assert max(num_ens) == min(num_ens)
sc["add_self_edge"] = [True] * num_ens[0]
# [copy yml]
name_key = f"{arch_gnn['aggr']}_{arch_gnn['num_layers']}"
dir_log_full = log_dir(task, config_train_copy, name_key, dir_log, name_graph, git_rev, timestamp)
return params_train, config_sampler_preproc, config_sampler_train, config_data, arch_gnn, dir_log_full
def parse_n_prepare_postproc(dir_load, f_config, name_graph, dir_log, arch_gnn, logger):
if f_config is not None:
with open(f_config) as f:
config_postproc = yaml.load(f, Loader=yaml.FullLoader)
name_key = f"postproc-{arch_gnn['aggr']}_{arch_gnn['num_layers']}"
log_dir('postproc', config_postproc, name_key, dir_log, name_graph, git_rev, timestamp)
skip_instantiate = []
if 'check_record' in config_postproc:
load_acc_record = config_postproc['check_record']
else:
load_acc_record = True
if config_postproc['method'] == 'cs': # C&S
acc_record = [] if load_acc_record else None
if dir_load is not None:
if 'dir_pred_mat' not in config_postproc:
config_postproc['dir_pred_mat'] = [dir_load]
elif os.path.realpath(dir_load) not in [os.path.realpath(pc) for pc in config_postproc['dir_pred_mat']]:
config_postproc['dir_pred_mat'].append(dir_load)
config_postproc['pred_mat'] = [None] * len(config_postproc['dir_pred_mat'])
for i, di in enumerate(config_postproc['dir_pred_mat']):
if load_acc_record:
acc_record.append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'cs' == f.split('.')[-1] and f.startswith('pred_mat'):
config_postproc['pred_mat'][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for m in config_postproc['pred_mat']):
skip_instantiate = ['data', 'model']
elif config_postproc['method'] == 'ensemble': # Variant of subgraph ensemble as postproc
acc_record = {s: [] for s in config_postproc['dir_emb_mat']} if load_acc_record else None
assert dir_load is None
config_postproc['emb_mat'] = {k: [None] * len(v) for k, v in config_postproc['dir_emb_mat'].items()}
for sname, dirs_l in config_postproc['dir_emb_mat'].items():
for i, di in enumerate(dirs_l):
if load_acc_record:
acc_record[sname].append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'ens' == f.split('.')[-1] and f.startswith('emb_mat'):
config_postproc['emb_mat'][sname][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for s, mat_l in config_postproc['emb_mat'].items() for m in mat_l):
skip_instantiate = ['model'] # you have to load data (role, labels) anyways
return config_postproc, acc_record, skip_instantiate
def log_dir(task, config_new, yml_name_key, dir_log, name_graph, git_rev, timestamp):
if task == 'train':
prefix = 'running'
elif task == 'inference':
prefix = 'INF'
elif task == 'postproc':
prefix = 'POST'
else:
raise NotImplementedError
log_dir = f"{dir_log}/{name_graph}/{prefix}/{timestamp}-{git_rev.strip():s}/"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
yml_file = f"{log_dir}/{yml_name_key}.yml"
with open(yml_file, 'w') as f:
yaml.dump(config_new, f, default_flow_style=False, sort_keys=False)
return log_dir
# =============== #
# ADJ UTILS #
# =============== #
def get_deg_torch_sparse(adj):
return scatter(adj._values(), adj._indices()[0], reduce="sum")
def adj_norm_rw(adj, deg=None, dropedge=0., sort_indices=True):
if type(adj) == torch.Tensor:
assert deg is None
assert torch.sum(adj._values()).cpu().long().item() == adj._values().size()[0]
_deg_orig = get_deg_torch_sparse(adj)
if dropedge > 0:
masked_indices = torch.floor(torch.rand(int(adj._values().size()[0] * dropedge)) * adj._values().size()[0]).long()
adj._values()[masked_indices] = 0
_deg_dropped = get_deg_torch_sparse(adj)
else:
_deg_dropped = _deg_orig
_deg = torch.repeat_interleave(_deg_dropped, _deg_orig.long())
_deg = torch.clamp(_deg, min=1)
_val = adj._values()
_val /= _deg
adj_norm = adj
else:
assert dropedge == 0., "not supporting dropedge for scipy csr matrices"
assert adj.shape[0] == adj.shape[1]
diag_shape = (adj.shape[0], adj.shape[1])
D = adj.sum(1).flatten() if deg is None else deg
D = np.clip(D, 1, None) # if deg_v == 0, it doesn't matter what value we clip it to.
norm_diag = sp.dia_matrix((1 / D, 0), shape=diag_shape)
adj_norm = norm_diag.dot(adj)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def adj_norm_sym(adj, sort_indices=True, add_self_edge=False, dropedge=0.):
assert adj.shape[0] == adj.shape[1]
assert adj.data.sum() == adj.size, "symmetric normalization only supports binary input adj"
N = adj.shape[0]
if dropedge > 0:
masked_indices = np.random.choice(adj.size, int(adj.size * dropedge))
adj.data[masked_indices] = 0
adjT = adj.tocsc()
data_add = adj.data + adjT.data
survived_indices = np.where(data_add == 2)[0]
adj.data *= 0
adj.data[survived_indices] = 1
if add_self_edge:
indptr_new = np.zeros(N + 1)
neigh_list = [set(adj.indices[adj.indptr[v] : adj.indptr[v+1]]) for v in range(N)]
for i in range(len(neigh_list)):
neigh_list[i].add(i)
neigh_list[i] = np.sort(np.fromiter(neigh_list[i], int, len(neigh_list[i])))
indptr_new[i + 1] = neigh_list[i].size
indptr_new = indptr_new.cumsum()
indices_new = np.concatenate(neigh_list)
data_new = np.broadcast_to(np.ones(1), indices_new.size)
adj_aug = sp.csr_matrix((data_new, indices_new, indptr_new), shape=adj.shape)
else:
adj_aug = adj
D = np.clip(adj_aug.sum(1).flatten(), 1, None)
norm_diag = sp.dia_matrix((np.power(D, -0.5), 0), shape=adj_aug.shape)
adj_norm = norm_diag.dot(adj_aug).dot(norm_diag)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def coo_scipy2torch(adj):
values = adj.data
indices = np.vstack((adj.row, adj.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
return torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))
def load_adj(prefix, dataset, type_, split_):
assert split_ in ['full', 'train'], "UNKNOWN ADJ SPLIT. ONLY ACCEPT [full] or [train]"
assert type_ in ['raw', 'undirected'], "UNKNOWN ADJ TYPE. ONLY ACCEPT [raw] or [undirected]"
file_adj = f"{prefix}/{dataset}/adj_{split_}_{type_}." + "{}"
if os.path.isfile(file_adj.format('npz')):
adj = sp.load_npz(file_adj.format('npz'))
elif os.path.isfile(file_adj.format('npy')):
adj_d = np.load(file_adj.format('npy'), allow_pickle=True)
if type(adj_d) == np.ndarray:
adj_d = adj_d[()]
else:
assert type(adj_d) == dict
indptr = adj_d['indptr']
indices = adj_d['indices']
if 'data' in adj_d:
data = adj_d['data']
else:
data = np.broadcast_to(np.ones(1, dtype=np.bool), indices.size)
num_nodes = indptr.size - 1
adj = sp.csr_matrix((data, indices, indptr), shape=(num_nodes, num_nodes))
else:
adj = None
return adj
def validate_bin_file(bin_adj_files):
for md, df in bin_adj_files.items():
assert set(df.keys()) == set(['indptr', 'indices', 'data'])
if not os.path.isfile(df['indptr']) or not os.path.isfile(df['indices']):
return {mmd: None for mmd in bin_adj_files}
if not os.path.isfile(df['data']):
df['data'] = ''
return bin_adj_files
def merge_stat_record(dict_l : List[dict]):
key_l = [set(d.keys()) for d in dict_l]
assert all(k == key_l[0] == set([TRAIN, VALID, TEST]) for k in key_l)
names_stat = set(dict_l[0][TRAIN].keys())
ret = {n: {TRAIN: [], VALID: [], TEST: []} for n in names_stat}
for d in dict_l:
for m in [TRAIN, VALID, TEST]:
assert set(d[m].keys()) == names_stat
for k, v in d[m].items():
ret[k][m].append(v)
return ret | true | true |
f73e36bcec7cc22252107d2df53a3249c19af8ba | 104 | py | Python | poroto/test.py | TANGO-Project/poroto | 380c0ab9f33bead70ed71c78493e682924d7f997 | [
"BSD-3-Clause"
] | 1 | 2018-05-22T22:53:31.000Z | 2018-05-22T22:53:31.000Z | poroto/test.py | TANGO-Project/poroto | 380c0ab9f33bead70ed71c78493e682924d7f997 | [
"BSD-3-Clause"
] | null | null | null | poroto/test.py | TANGO-Project/poroto | 380c0ab9f33bead70ed71c78493e682924d7f997 | [
"BSD-3-Clause"
] | null | null | null | from collections import namedtuple
TestVector=namedtuple('TestVector', ['test_points', 'test_vectors'])
| 34.666667 | 68 | 0.807692 | from collections import namedtuple
TestVector=namedtuple('TestVector', ['test_points', 'test_vectors'])
| true | true |
f73e37d400fe7539be75a6acab12504b60a90309 | 319 | py | Python | models.py | budica-vasilica/webapp | f5685f859e925d8d98e61c256afbcd8e7ca238bc | [
"Apache-2.0"
] | null | null | null | models.py | budica-vasilica/webapp | f5685f859e925d8d98e61c256afbcd8e7ca238bc | [
"Apache-2.0"
] | null | null | null | models.py | budica-vasilica/webapp | f5685f859e925d8d98e61c256afbcd8e7ca238bc | [
"Apache-2.0"
] | null | null | null | from flask_login import UserMixin
from __init__ import db
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True) # primary keys are required by SQLAlchemy
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000)) | 39.875 | 91 | 0.714734 | from flask_login import UserMixin
from __init__ import db
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000)) | true | true |
f73e382358074824f39a37c005f957e13a773d96 | 814 | py | Python | cogs/wow/config.py | kennethntnguyen/wow_news_bot | da9086f7ebd0acbcda0d9e5ff44fa0837aa161f1 | [
"MIT"
] | null | null | null | cogs/wow/config.py | kennethntnguyen/wow_news_bot | da9086f7ebd0acbcda0d9e5ff44fa0837aa161f1 | [
"MIT"
] | null | null | null | cogs/wow/config.py | kennethntnguyen/wow_news_bot | da9086f7ebd0acbcda0d9e5ff44fa0837aa161f1 | [
"MIT"
] | null | null | null | import os
mongodb_atlas = {
"connection_string": os.environ.get('MONGODB_CONNECTION_STRING'),
"database_name": "info-bot",
"news_collection_name": "wow-news",
"log_collections": {"commands": "user-commands-log", "updater": "push-updates-log"}
}
article_types = {
"HOTFIXES": "hotfixes",
"LATEST": "latest"
}
article_keys = {
"TYPE": "type",
"ID": "_id",
"TITLE": "title",
"DESCRIPTION": "description",
"DATETIME": "datetime",
"URL": "url",
"IMAGE_URL": "image_url"
}
news_cog = {
"embed_color": {
"r": 252,
"g": 186,
"b": 3
}
}
updater_cog = {
"news_channel_id": 823082892367364156,
"wow_role_id": 742188088461099148,
"refresh_rate_seconds": 5,
"embed_color": {
"r": 255,
"g": 75,
"b": 35
}
}
| 18.930233 | 87 | 0.568796 | import os
mongodb_atlas = {
"connection_string": os.environ.get('MONGODB_CONNECTION_STRING'),
"database_name": "info-bot",
"news_collection_name": "wow-news",
"log_collections": {"commands": "user-commands-log", "updater": "push-updates-log"}
}
article_types = {
"HOTFIXES": "hotfixes",
"LATEST": "latest"
}
article_keys = {
"TYPE": "type",
"ID": "_id",
"TITLE": "title",
"DESCRIPTION": "description",
"DATETIME": "datetime",
"URL": "url",
"IMAGE_URL": "image_url"
}
news_cog = {
"embed_color": {
"r": 252,
"g": 186,
"b": 3
}
}
updater_cog = {
"news_channel_id": 823082892367364156,
"wow_role_id": 742188088461099148,
"refresh_rate_seconds": 5,
"embed_color": {
"r": 255,
"g": 75,
"b": 35
}
}
| true | true |
f73e3954a7d580c39496f63b075b45618b77b96c | 1,693 | py | Python | 3_ gph-low-pass-filter.py | dmuehlemann/RPGV | 18b4216e6cedce40a020a57e1822a363a8a6b60c | [
"MIT"
] | null | null | null | 3_ gph-low-pass-filter.py | dmuehlemann/RPGV | 18b4216e6cedce40a020a57e1822a363a8a6b60c | [
"MIT"
] | 2 | 2021-12-14T11:54:18.000Z | 2021-12-15T10:03:37.000Z | 3_ gph-low-pass-filter.py | dmuehlemann/RPGV | 18b4216e6cedce40a020a57e1822a363a8a6b60c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sam Aug 7 11:50:05 2020
@author: Dirk
This scripts applies a 10day low pass filter to the ERA5 gph daily means
"""
import scipy.signal as signal
import matplotlib.pyplot as plt
from pathlib import Path
import xarray as xr
#Define input and output data
data_folder = Path("../data/")
filename = data_folder / 'gph-daily-mean.nc'
data_out = data_folder / 'gph-daily-mean-lowpass_2_0-1.nc'
fig_out = data_folder / 'fig/gph-daily-mean-lowpass_2_0-1.png'
#Load data
z_all = xr.open_dataset(filename)
# First, design the Buterworth filter
N = 2 # Filter order
Wn = 0.1 # Cutoff frequency
B, A = signal.butter(N, Wn, output='ba')
# temp = z_all.isel(latitude=10, longitude=10).z.loc["2000-01-01":"2005-01-01"]
# Second, apply the filter
z_allf = xr.apply_ufunc(
signal.filtfilt, B, A, z_all,
kwargs=dict(
axis=0,
)
)
# Make plots
d = 10000
a=10150
b=100
c=150
for i in range(0,10):
fig = plt.figure()
ax1 = fig.add_subplot(211)
plt.plot(z_all.z[d:a, b, c], 'b-')
plt.plot(z_allf.z[d:a, b, c], 'r-',)
plt.ylabel("Geopotential height")
plt.legend(['Original','Filtered'])
plt.title("4-day lowpass filtered geopotential height")
ax1.axes.get_xaxis().set_visible(False)
ax1 = fig.add_subplot(212)
plt.plot(z_all.z[d:a, b, c]-z_allf.z[d:a, b, c], 'b-')
plt.ylabel("Geopotential height")
plt.xlabel("Days")
plt.legend(['Residuals'])
name= 'fig/filter/gph-daily-mean-lowpass_2_0-25_150d'+str(i)+'.png'
a = a +5
b = b +5
c = c+5
d = d +5
fig.savefig(data_folder / name)
#save results and plot
# z_allf.to_netcdf(data_out)
# fig.savefig(fig_out)
| 21.705128 | 79 | 0.653869 |
import scipy.signal as signal
import matplotlib.pyplot as plt
from pathlib import Path
import xarray as xr
data_folder = Path("../data/")
filename = data_folder / 'gph-daily-mean.nc'
data_out = data_folder / 'gph-daily-mean-lowpass_2_0-1.nc'
fig_out = data_folder / 'fig/gph-daily-mean-lowpass_2_0-1.png'
z_all = xr.open_dataset(filename)
N = 2
Wn = 0.1
B, A = signal.butter(N, Wn, output='ba')
z_allf = xr.apply_ufunc(
signal.filtfilt, B, A, z_all,
kwargs=dict(
axis=0,
)
)
d = 10000
a=10150
b=100
c=150
for i in range(0,10):
fig = plt.figure()
ax1 = fig.add_subplot(211)
plt.plot(z_all.z[d:a, b, c], 'b-')
plt.plot(z_allf.z[d:a, b, c], 'r-',)
plt.ylabel("Geopotential height")
plt.legend(['Original','Filtered'])
plt.title("4-day lowpass filtered geopotential height")
ax1.axes.get_xaxis().set_visible(False)
ax1 = fig.add_subplot(212)
plt.plot(z_all.z[d:a, b, c]-z_allf.z[d:a, b, c], 'b-')
plt.ylabel("Geopotential height")
plt.xlabel("Days")
plt.legend(['Residuals'])
name= 'fig/filter/gph-daily-mean-lowpass_2_0-25_150d'+str(i)+'.png'
a = a +5
b = b +5
c = c+5
d = d +5
fig.savefig(data_folder / name)
| true | true |
f73e39aa80484ff1d7ac6529844e14a2ec22c427 | 828 | py | Python | tasks.py | tommccoy1/tpdn | a4ea54030056a49e5fd00a700eb71790157bc697 | [
"MIT"
] | 18 | 2018-12-25T21:03:02.000Z | 2022-01-07T17:56:20.000Z | tasks.py | tommccoy1/tpdn | a4ea54030056a49e5fd00a700eb71790157bc697 | [
"MIT"
] | null | null | null | tasks.py | tommccoy1/tpdn | a4ea54030056a49e5fd00a700eb71790157bc697 | [
"MIT"
] | 3 | 2019-02-15T17:55:30.000Z | 2022-01-31T19:10:06.000Z | from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
from random import shuffle
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import sys
import os
import time
import math
import pickle
# Provide the predefined digit sequence tasks
def interleaved(sequence):
if len(sequence) <= 1:
return list(sequence)
else:
return [sequence[0], sequence[-1]] + interleaved(sequence[1:-1])
def transform(sequence, task):
if task == "auto":
return sequence
if task == "rev":
return sequence[::-1]
if task == "sort":
return sorted(sequence)
if task == "interleave":
return interleaved(sequence)
| 20.195122 | 72 | 0.705314 | from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
from random import shuffle
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import sys
import os
import time
import math
import pickle
def interleaved(sequence):
if len(sequence) <= 1:
return list(sequence)
else:
return [sequence[0], sequence[-1]] + interleaved(sequence[1:-1])
def transform(sequence, task):
if task == "auto":
return sequence
if task == "rev":
return sequence[::-1]
if task == "sort":
return sorted(sequence)
if task == "interleave":
return interleaved(sequence)
| true | true |
f73e3bdf82ffa4be8011152eb9af731e09bab5ec | 516 | py | Python | tesla/config.py | fniewijk/tesla-simulator-server | c568592e74717416f99f03d91c1cedb7e36c6663 | [
"MIT"
] | null | null | null | tesla/config.py | fniewijk/tesla-simulator-server | c568592e74717416f99f03d91c1cedb7e36c6663 | [
"MIT"
] | null | null | null | tesla/config.py | fniewijk/tesla-simulator-server | c568592e74717416f99f03d91c1cedb7e36c6663 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
class Config(object):
DEBUG = True
HOST = '0.0.0.0'
PORT = os.getenv('TESLA_PORT', 8000)
SECRET_KEY = (
'\xc85\x95\x9a\x80\xc1\x93\xd0\xe9\x95\x08\xfb\xbe\x85'
'\xd0\x1aq\xd3\x95\xc9\xad \xc0\x08'
)
#http://docs.timdorr.apiary.io/#reference/authentication/tokens
#TESLA_CLIENT_ID=e4a9949fcfa04068f59abb5a658f2bac0a3428e4652315490b659d5ab3f35a9e
#TESLA_CLIENT_SECRET=c75f14bbadc8bee3a7594412c31⁄416f8300256d7668ea7e6e7f06727bfb9d220
| 32.25 | 90 | 0.71124 |
import os
class Config(object):
DEBUG = True
HOST = '0.0.0.0'
PORT = os.getenv('TESLA_PORT', 8000)
SECRET_KEY = (
'\xc85\x95\x9a\x80\xc1\x93\xd0\xe9\x95\x08\xfb\xbe\x85'
'\xd0\x1aq\xd3\x95\xc9\xad \xc0\x08'
)
| true | true |
f73e3cacfc48dcca53767e34827baca54ab71500 | 11,888 | py | Python | tests/aiohttp/test_aiohttp_simple_api.py | Svendegroote91/connexion | 26843462d9b80e25bd458b7ce8c246ac94c3bf4e | [
"Apache-2.0"
] | 1 | 2019-05-17T11:01:15.000Z | 2019-05-17T11:01:15.000Z | tests/aiohttp/test_aiohttp_simple_api.py | Svendegroote91/connexion | 26843462d9b80e25bd458b7ce8c246ac94c3bf4e | [
"Apache-2.0"
] | null | null | null | tests/aiohttp/test_aiohttp_simple_api.py | Svendegroote91/connexion | 26843462d9b80e25bd458b7ce8c246ac94c3bf4e | [
"Apache-2.0"
] | 4 | 2019-07-09T02:30:48.000Z | 2021-12-10T17:36:40.000Z | import asyncio
import sys
import pytest
import yaml
import aiohttp.web
from conftest import TEST_FOLDER
from connexion import AioHttpApp
try:
import ujson as json
except ImportError:
import json
@pytest.fixture
def aiohttp_app(aiohttp_api_spec_dir):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
options = {"validate_responses": True}
app.add_api('swagger_simple.yaml', validate_responses=True, pass_context_arg_name='request_ctx', options=options)
return app
@asyncio.coroutine
def test_app(aiohttp_app, aiohttp_client):
# Create the app and run the test_app testcase below.
app_client = yield from aiohttp_client(aiohttp_app.app)
get_bye = yield from app_client.get('/v1.0/bye/jsantos')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'Goodbye jsantos'
@asyncio.coroutine
def test_app_with_relative_path(aiohttp_api_spec_dir, aiohttp_client):
# Create the app with a relative path and run the test_app testcase below.
app = AioHttpApp(__name__, port=5001,
specification_dir='..' /
aiohttp_api_spec_dir.relative_to(TEST_FOLDER),
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
get_bye = yield from app_client.get('/v1.0/bye/jsantos')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'Goodbye jsantos'
@asyncio.coroutine
def test_swagger_json(aiohttp_api_spec_dir, aiohttp_client):
""" Verify the swagger.json file is returned for default setting passed to app. """
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
api = app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_json = yield from app_client.get('/v1.0/swagger.json')
json_ = yield from swagger_json.read()
assert swagger_json.status == 200
assert api.specification.raw == json.loads(json_)
@asyncio.coroutine
def test_swagger_yaml(aiohttp_api_spec_dir, aiohttp_client):
""" Verify the swagger.yaml file is returned for default setting passed to app. """
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
api = app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
spec_response = yield from app_client.get('/v1.0/swagger.yaml')
data_ = yield from spec_response.read()
assert spec_response.status == 200
assert api.specification.raw == yaml.load(data_)
@asyncio.coroutine
def test_no_swagger_json(aiohttp_api_spec_dir, aiohttp_client):
""" Verify the swagger.json file is not returned when set to False when creating app. """
options = {"swagger_json": False}
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
options=options,
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_json = yield from app_client.get('/v1.0/swagger.json') # type: flask.Response
assert swagger_json.status == 404
@asyncio.coroutine
def test_no_swagger_yaml(aiohttp_api_spec_dir, aiohttp_client):
""" Verify the swagger.json file is not returned when set to False when creating app. """
options = {"swagger_json": False}
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
options=options,
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
spec_response = yield from app_client.get('/v1.0/swagger.yaml') # type: flask.Response
assert spec_response.status == 404
@asyncio.coroutine
def test_swagger_ui(aiohttp_api_spec_dir, aiohttp_client):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_ui = yield from app_client.get('/v1.0/ui')
assert swagger_ui.status == 200
assert swagger_ui.url.path == '/v1.0/ui/'
assert b'url = "/v1.0/swagger.json"' in (yield from swagger_ui.read())
swagger_ui = yield from app_client.get('/v1.0/ui/')
assert swagger_ui.status == 200
assert b'url = "/v1.0/swagger.json"' in (yield from swagger_ui.read())
@asyncio.coroutine
def test_swagger_ui_index(aiohttp_api_spec_dir, aiohttp_client):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_ui = yield from app_client.get('/v1.0/ui/index.html')
assert swagger_ui.status == 200
assert b'url = "/v1.0/swagger.json"' in (yield from swagger_ui.read())
@asyncio.coroutine
def test_swagger_ui_static(aiohttp_api_spec_dir, aiohttp_client):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_ui = yield from app_client.get('/v1.0/ui/lib/swagger-oauth.js')
assert swagger_ui.status == 200
app_client = yield from aiohttp_client(app.app)
swagger_ui = yield from app_client.get('/v1.0/ui/swagger-ui.min.js')
assert swagger_ui.status == 200
@asyncio.coroutine
def test_no_swagger_ui(aiohttp_api_spec_dir, aiohttp_client):
options = {"swagger_ui": False}
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
options=options, debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_ui = yield from app_client.get('/v1.0/ui/')
assert swagger_ui.status == 404
app2 = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
options = {"swagger_ui": False}
app2.add_api('swagger_simple.yaml', options=options)
app2_client = yield from aiohttp_client(app.app)
swagger_ui2 = yield from app2_client.get('/v1.0/ui/')
assert swagger_ui2.status == 404
@asyncio.coroutine
def test_middlewares(aiohttp_api_spec_dir, aiohttp_client):
@asyncio.coroutine
def middleware(app, handler):
@asyncio.coroutine
def middleware_handler(request):
response = (yield from handler(request))
response.body += b' middleware'
return response
return middleware_handler
options = {"middlewares": [middleware]}
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True, options=options)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
get_bye = yield from app_client.get('/v1.0/bye/jsantos')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'Goodbye jsantos middleware'
@asyncio.coroutine
def test_response_with_str_body(aiohttp_app, aiohttp_client):
# Create the app and run the test_app testcase below.
app_client = yield from aiohttp_client(aiohttp_app.app)
get_bye = yield from app_client.get('/v1.0/aiohttp_str_response')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'str response'
@asyncio.coroutine
def test_response_with_non_str_and_non_json_body(aiohttp_app, aiohttp_client):
app_client = yield from aiohttp_client(aiohttp_app.app)
get_bye = yield from app_client.get(
'/v1.0/aiohttp_non_str_non_json_response'
)
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'1234'
@asyncio.coroutine
def test_response_with_bytes_body(aiohttp_app, aiohttp_client):
# Create the app and run the test_app testcase below.
app_client = yield from aiohttp_client(aiohttp_app.app)
get_bye = yield from app_client.get('/v1.0/aiohttp_bytes_response')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'bytes response'
@asyncio.coroutine
def test_validate_responses(aiohttp_app, aiohttp_client):
app_client = yield from aiohttp_client(aiohttp_app.app)
get_bye = yield from app_client.get('/v1.0/aiohttp_validate_responses')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'{"validate": true}'
@asyncio.coroutine
def test_get_users(aiohttp_client, aiohttp_app):
app_client = yield from aiohttp_client(aiohttp_app.app)
resp = yield from app_client.get('/v1.0/users')
assert resp.url.path == '/v1.0/users/' # followed redirect
assert resp.status == 200
json_data = yield from resp.json()
assert json_data == \
[{'name': 'John Doe', 'id': 1}, {'name': 'Nick Carlson', 'id': 2}]
@asyncio.coroutine
def test_create_user(aiohttp_client, aiohttp_app):
app_client = yield from aiohttp_client(aiohttp_app.app)
user = {'name': 'Maksim'}
resp = yield from app_client.post('/v1.0/users', json=user, headers={'Content-type': 'application/json'})
assert resp.status == 201
@asyncio.coroutine
def test_access_request_context(aiohttp_client, aiohttp_app):
app_client = yield from aiohttp_client(aiohttp_app.app)
resp = yield from app_client.post('/v1.0/aiohttp_access_request_context/')
assert resp.status == 204
@asyncio.coroutine
def test_query_parsing_simple(aiohttp_client, aiohttp_app):
expected_query = 'query'
app_client = yield from aiohttp_client(aiohttp_app.app)
resp = yield from app_client.get(
'/v1.0/aiohttp_query_parsing_str',
params={
'query': expected_query,
},
)
assert resp.status == 200
json_data = yield from resp.json()
assert json_data == {'query': expected_query}
@asyncio.coroutine
def test_query_parsing_array(aiohttp_client, aiohttp_app):
expected_query = ['queryA', 'queryB']
app_client = yield from aiohttp_client(aiohttp_app.app)
resp = yield from app_client.get(
'/v1.0/aiohttp_query_parsing_array',
params={
'query': ','.join(expected_query),
},
)
assert resp.status == 200
json_data = yield from resp.json()
assert json_data == {'query': expected_query}
@asyncio.coroutine
def test_query_parsing_array_multi(aiohttp_client, aiohttp_app):
expected_query = ['queryA', 'queryB', 'queryC']
query_str = '&'.join(['query=%s' % q for q in expected_query])
app_client = yield from aiohttp_client(aiohttp_app.app)
resp = yield from app_client.get(
'/v1.0/aiohttp_query_parsing_array_multi?%s' % query_str,
)
assert resp.status == 200
json_data = yield from resp.json()
assert json_data == {'query': expected_query}
if sys.version_info[0:2] >= (3, 5):
@pytest.fixture
def aiohttp_app_async_def(aiohttp_api_spec_dir):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
app.add_api('swagger_simple_async_def.yaml', validate_responses=True)
return app
@asyncio.coroutine
def test_validate_responses_async_def(aiohttp_app_async_def, aiohttp_client):
app_client = yield from aiohttp_client(aiohttp_app_async_def.app)
get_bye = yield from app_client.get('/v1.0/aiohttp_validate_responses')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'{"validate": true}'
| 35.807229 | 117 | 0.689603 | import asyncio
import sys
import pytest
import yaml
import aiohttp.web
from conftest import TEST_FOLDER
from connexion import AioHttpApp
try:
import ujson as json
except ImportError:
import json
@pytest.fixture
def aiohttp_app(aiohttp_api_spec_dir):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
options = {"validate_responses": True}
app.add_api('swagger_simple.yaml', validate_responses=True, pass_context_arg_name='request_ctx', options=options)
return app
@asyncio.coroutine
def test_app(aiohttp_app, aiohttp_client):
app_client = yield from aiohttp_client(aiohttp_app.app)
get_bye = yield from app_client.get('/v1.0/bye/jsantos')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'Goodbye jsantos'
@asyncio.coroutine
def test_app_with_relative_path(aiohttp_api_spec_dir, aiohttp_client):
app = AioHttpApp(__name__, port=5001,
specification_dir='..' /
aiohttp_api_spec_dir.relative_to(TEST_FOLDER),
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
get_bye = yield from app_client.get('/v1.0/bye/jsantos')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'Goodbye jsantos'
@asyncio.coroutine
def test_swagger_json(aiohttp_api_spec_dir, aiohttp_client):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
api = app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_json = yield from app_client.get('/v1.0/swagger.json')
json_ = yield from swagger_json.read()
assert swagger_json.status == 200
assert api.specification.raw == json.loads(json_)
@asyncio.coroutine
def test_swagger_yaml(aiohttp_api_spec_dir, aiohttp_client):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
api = app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
spec_response = yield from app_client.get('/v1.0/swagger.yaml')
data_ = yield from spec_response.read()
assert spec_response.status == 200
assert api.specification.raw == yaml.load(data_)
@asyncio.coroutine
def test_no_swagger_json(aiohttp_api_spec_dir, aiohttp_client):
options = {"swagger_json": False}
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
options=options,
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_json = yield from app_client.get('/v1.0/swagger.json')
assert swagger_json.status == 404
@asyncio.coroutine
def test_no_swagger_yaml(aiohttp_api_spec_dir, aiohttp_client):
options = {"swagger_json": False}
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
options=options,
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
spec_response = yield from app_client.get('/v1.0/swagger.yaml')
assert spec_response.status == 404
@asyncio.coroutine
def test_swagger_ui(aiohttp_api_spec_dir, aiohttp_client):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_ui = yield from app_client.get('/v1.0/ui')
assert swagger_ui.status == 200
assert swagger_ui.url.path == '/v1.0/ui/'
assert b'url = "/v1.0/swagger.json"' in (yield from swagger_ui.read())
swagger_ui = yield from app_client.get('/v1.0/ui/')
assert swagger_ui.status == 200
assert b'url = "/v1.0/swagger.json"' in (yield from swagger_ui.read())
@asyncio.coroutine
def test_swagger_ui_index(aiohttp_api_spec_dir, aiohttp_client):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_ui = yield from app_client.get('/v1.0/ui/index.html')
assert swagger_ui.status == 200
assert b'url = "/v1.0/swagger.json"' in (yield from swagger_ui.read())
@asyncio.coroutine
def test_swagger_ui_static(aiohttp_api_spec_dir, aiohttp_client):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_ui = yield from app_client.get('/v1.0/ui/lib/swagger-oauth.js')
assert swagger_ui.status == 200
app_client = yield from aiohttp_client(app.app)
swagger_ui = yield from app_client.get('/v1.0/ui/swagger-ui.min.js')
assert swagger_ui.status == 200
@asyncio.coroutine
def test_no_swagger_ui(aiohttp_api_spec_dir, aiohttp_client):
options = {"swagger_ui": False}
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
options=options, debug=True)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
swagger_ui = yield from app_client.get('/v1.0/ui/')
assert swagger_ui.status == 404
app2 = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
options = {"swagger_ui": False}
app2.add_api('swagger_simple.yaml', options=options)
app2_client = yield from aiohttp_client(app.app)
swagger_ui2 = yield from app2_client.get('/v1.0/ui/')
assert swagger_ui2.status == 404
@asyncio.coroutine
def test_middlewares(aiohttp_api_spec_dir, aiohttp_client):
@asyncio.coroutine
def middleware(app, handler):
@asyncio.coroutine
def middleware_handler(request):
response = (yield from handler(request))
response.body += b' middleware'
return response
return middleware_handler
options = {"middlewares": [middleware]}
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True, options=options)
app.add_api('swagger_simple.yaml')
app_client = yield from aiohttp_client(app.app)
get_bye = yield from app_client.get('/v1.0/bye/jsantos')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'Goodbye jsantos middleware'
@asyncio.coroutine
def test_response_with_str_body(aiohttp_app, aiohttp_client):
app_client = yield from aiohttp_client(aiohttp_app.app)
get_bye = yield from app_client.get('/v1.0/aiohttp_str_response')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'str response'
@asyncio.coroutine
def test_response_with_non_str_and_non_json_body(aiohttp_app, aiohttp_client):
app_client = yield from aiohttp_client(aiohttp_app.app)
get_bye = yield from app_client.get(
'/v1.0/aiohttp_non_str_non_json_response'
)
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'1234'
@asyncio.coroutine
def test_response_with_bytes_body(aiohttp_app, aiohttp_client):
app_client = yield from aiohttp_client(aiohttp_app.app)
get_bye = yield from app_client.get('/v1.0/aiohttp_bytes_response')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'bytes response'
@asyncio.coroutine
def test_validate_responses(aiohttp_app, aiohttp_client):
app_client = yield from aiohttp_client(aiohttp_app.app)
get_bye = yield from app_client.get('/v1.0/aiohttp_validate_responses')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'{"validate": true}'
@asyncio.coroutine
def test_get_users(aiohttp_client, aiohttp_app):
app_client = yield from aiohttp_client(aiohttp_app.app)
resp = yield from app_client.get('/v1.0/users')
assert resp.url.path == '/v1.0/users/'
assert resp.status == 200
json_data = yield from resp.json()
assert json_data == \
[{'name': 'John Doe', 'id': 1}, {'name': 'Nick Carlson', 'id': 2}]
@asyncio.coroutine
def test_create_user(aiohttp_client, aiohttp_app):
app_client = yield from aiohttp_client(aiohttp_app.app)
user = {'name': 'Maksim'}
resp = yield from app_client.post('/v1.0/users', json=user, headers={'Content-type': 'application/json'})
assert resp.status == 201
@asyncio.coroutine
def test_access_request_context(aiohttp_client, aiohttp_app):
app_client = yield from aiohttp_client(aiohttp_app.app)
resp = yield from app_client.post('/v1.0/aiohttp_access_request_context/')
assert resp.status == 204
@asyncio.coroutine
def test_query_parsing_simple(aiohttp_client, aiohttp_app):
expected_query = 'query'
app_client = yield from aiohttp_client(aiohttp_app.app)
resp = yield from app_client.get(
'/v1.0/aiohttp_query_parsing_str',
params={
'query': expected_query,
},
)
assert resp.status == 200
json_data = yield from resp.json()
assert json_data == {'query': expected_query}
@asyncio.coroutine
def test_query_parsing_array(aiohttp_client, aiohttp_app):
expected_query = ['queryA', 'queryB']
app_client = yield from aiohttp_client(aiohttp_app.app)
resp = yield from app_client.get(
'/v1.0/aiohttp_query_parsing_array',
params={
'query': ','.join(expected_query),
},
)
assert resp.status == 200
json_data = yield from resp.json()
assert json_data == {'query': expected_query}
@asyncio.coroutine
def test_query_parsing_array_multi(aiohttp_client, aiohttp_app):
expected_query = ['queryA', 'queryB', 'queryC']
query_str = '&'.join(['query=%s' % q for q in expected_query])
app_client = yield from aiohttp_client(aiohttp_app.app)
resp = yield from app_client.get(
'/v1.0/aiohttp_query_parsing_array_multi?%s' % query_str,
)
assert resp.status == 200
json_data = yield from resp.json()
assert json_data == {'query': expected_query}
if sys.version_info[0:2] >= (3, 5):
@pytest.fixture
def aiohttp_app_async_def(aiohttp_api_spec_dir):
app = AioHttpApp(__name__, port=5001,
specification_dir=aiohttp_api_spec_dir,
debug=True)
app.add_api('swagger_simple_async_def.yaml', validate_responses=True)
return app
@asyncio.coroutine
def test_validate_responses_async_def(aiohttp_app_async_def, aiohttp_client):
app_client = yield from aiohttp_client(aiohttp_app_async_def.app)
get_bye = yield from app_client.get('/v1.0/aiohttp_validate_responses')
assert get_bye.status == 200
assert (yield from get_bye.read()) == b'{"validate": true}'
| true | true |
f73e3d4c3bf3b19dad150fc4a8d5991cea32220b | 1,478 | py | Python | hohgwuhn/geovis.py | adyates/ksw-school-scrape | b2d71fee9bca4e0bfd5303c3b2b24583afc0964a | [
"Apache-2.0"
] | null | null | null | hohgwuhn/geovis.py | adyates/ksw-school-scrape | b2d71fee9bca4e0bfd5303c3b2b24583afc0964a | [
"Apache-2.0"
] | 4 | 2015-07-04T12:02:19.000Z | 2019-04-28T18:09:29.000Z | hohgwuhn/geovis.py | adyates/ksw-school-scrape | b2d71fee9bca4e0bfd5303c3b2b24583afc0964a | [
"Apache-2.0"
] | null | null | null | import csv
from urllib.parse import quote
import webbrowser
from . import geocoder_googs as geocoder
GOOGLE_STATIC_MAPS_ENDPOINT = (
'https://maps.googleapis.com/maps/api/staticmap?size=1280x720&markers=')
# Compute the max number of markers I can safely add before hitting the Static Map API char limit.
# String of addition at the end is composed of the following:
# Length of urlencoded delimiters: comma (lat/lon) and pipe (marker points)
# 2 numbers per point consisting of:
# 1 - Sign character
# 3 - Max number of digits used by integer part
# 1 - Decimal
# 7 - Max number of digits used by fractional part (Est. based on points used)
MAX_EST_MARKER_COUNT = (2048 - len(GOOGLE_STATIC_MAPS_ENDPOINT)) / (
len(quote(',|')) + 2 * (1 + 3 + 1 + 7))
def exportMapsUrls():
marker_data = [[]] # Generate a sanity-check list of Google Static Map urls
with open(geocoder.SCHOOL_GEODATA_FILE, 'rb') as csv_in:
school_data = csv.DictReader(csv_in)
for school in school_data:
if len(marker_data[-1]) >= MAX_EST_MARKER_COUNT:
marker_data.append([])
marker_data[-1].append('%s,%s' % (school['Latitude'], school['Longitude']))
for marker_list in marker_data:
map_url = GOOGLE_STATIC_MAPS_ENDPOINT + '|'.join(marker_list)
# Verify they will load in a pretty way
webbrowser.open_new_tab(map_url)
if __name__ == '__main__':
exportMapsUrls()
| 36.04878 | 98 | 0.67862 | import csv
from urllib.parse import quote
import webbrowser
from . import geocoder_googs as geocoder
GOOGLE_STATIC_MAPS_ENDPOINT = (
'https://maps.googleapis.com/maps/api/staticmap?size=1280x720&markers=')
MAX_EST_MARKER_COUNT = (2048 - len(GOOGLE_STATIC_MAPS_ENDPOINT)) / (
len(quote(',|')) + 2 * (1 + 3 + 1 + 7))
def exportMapsUrls():
marker_data = [[]]
with open(geocoder.SCHOOL_GEODATA_FILE, 'rb') as csv_in:
school_data = csv.DictReader(csv_in)
for school in school_data:
if len(marker_data[-1]) >= MAX_EST_MARKER_COUNT:
marker_data.append([])
marker_data[-1].append('%s,%s' % (school['Latitude'], school['Longitude']))
for marker_list in marker_data:
map_url = GOOGLE_STATIC_MAPS_ENDPOINT + '|'.join(marker_list)
webbrowser.open_new_tab(map_url)
if __name__ == '__main__':
exportMapsUrls()
| true | true |
f73e3dde603d4288c31f41b55ef2f343eacaab9e | 1,298 | py | Python | hangman/lives_visual.py | Dafov/python-projects | 4cec3621effe3a24f677b73fc009680292640b32 | [
"MIT"
] | null | null | null | hangman/lives_visual.py | Dafov/python-projects | 4cec3621effe3a24f677b73fc009680292640b32 | [
"MIT"
] | null | null | null | hangman/lives_visual.py | Dafov/python-projects | 4cec3621effe3a24f677b73fc009680292640b32 | [
"MIT"
] | null | null | null | lives_visual_dict = {
0: """
___________
| / |
|/ ( )
| /|\\
| / \\
|
""",
1: """
___________
| / |
|/ ( )
| /|\\
| /
|
""",
2: """
___________
| / |
|/ ( )
| /|\\
|
|
""",
3: """
___________
| / |
|/ ( )
|
|
|
""",
4: """
___________
| / |
|/
|
|
|
""",
5: """
___________
| /
|/
|
|
|
""",
6: """
|
|
|
|
|
""",
7: "",
}
| 22 | 29 | 0.070108 | lives_visual_dict = {
0: """
___________
| / |
|/ ( )
| /|\\
| / \\
|
""",
1: """
___________
| / |
|/ ( )
| /|\\
| /
|
""",
2: """
___________
| / |
|/ ( )
| /|\\
|
|
""",
3: """
___________
| / |
|/ ( )
|
|
|
""",
4: """
___________
| / |
|/
|
|
|
""",
5: """
___________
| /
|/
|
|
|
""",
6: """
|
|
|
|
|
""",
7: "",
}
| true | true |
f73e3efe93b2b474b69960e7dafeb5f3b83d2f3a | 3,767 | py | Python | staging/stop_detection/stops.py | endremborza/data-bevy | 25398124595ffddc201de6a748e84bb24d5885b2 | [
"MIT"
] | null | null | null | staging/stop_detection/stops.py | endremborza/data-bevy | 25398124595ffddc201de6a748e84bb24d5885b2 | [
"MIT"
] | null | null | null | staging/stop_detection/stops.py | endremborza/data-bevy | 25398124595ffddc201de6a748e84bb24d5885b2 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from datetime import datetime
import datazimmer as dz
import pandas as pd
from colassigner import ColAssigner, get_all_cols
class NoStops(Exception):
pass
@dataclass
class DaySetup:
work_start: int
work_end: int
home_arrive: int
home_depart: int
class Coordinates(dz.CompositeTypeBase):
lat = float
lon = float
class Interval(dz.CompositeTypeBase):
start = datetime
end = datetime
class PingFeatures(dz.TableFeaturesBase):
loc = Coordinates
datetime = datetime
device_id = str
class StopFeatures(dz.TableFeaturesBase):
device_id = str
destination_label = str
stay_number = int
n_events = int
interval = Interval
center = Coordinates
is_home = bool
is_work = bool
info = str
class Labeler(ColAssigner):
def __init__(self, model, day: DaySetup) -> None:
self.model = model
self.day = day
def ts(self, df):
return df[PingFeatures.datetime].view(int) / 10**9
def hour(self, df):
return df[PingFeatures.datetime].dt.hour
def destination_label(self, df):
arr = df.loc[:, [PingFeatures.loc.lat, PingFeatures.loc.lon, Labeler.ts]].values
try:
return self.model.fit_predict(arr).astype(str)
except Exception as e:
assert "No stop events found" in str(e)
raise NoStops("hopefully")
def stay_number(self, df):
return (
df[Labeler.destination_label] != df[Labeler.destination_label].shift(1)
).cumsum()
def is_worktime(self, df):
return (df[Labeler.hour] >= self.day.work_start) & (
df[Labeler.hour] <= self.day.work_end
)
def is_hometime(self, df):
return (df[Labeler.hour] >= self.day.home_arrive) | (
df[Labeler.hour] <= self.day.home_depart
)
def proc_device_pings(ping_df, model, day: DaySetup):
return (
ping_df.sort_values(PingFeatures.datetime)
.pipe(Labeler(model, day))
.pipe(_gb_stop)
)
def _gb_stop(labeled_df):
dt_col = PingFeatures.datetime
return (
labeled_df.groupby([Labeler.stay_number, Labeler.destination_label])
.agg(
**{
StopFeatures.n_events: pd.NamedAgg(dt_col, "count"),
StopFeatures.interval.start: pd.NamedAgg(dt_col, "first"),
StopFeatures.interval.end: pd.NamedAgg(dt_col, "last"),
StopFeatures.center.lon: pd.NamedAgg(PingFeatures.loc.lon, "mean"),
StopFeatures.center.lat: pd.NamedAgg(PingFeatures.loc.lat, "mean"),
"home_rate": pd.NamedAgg(Labeler.is_hometime, "mean"),
"work_rate": pd.NamedAgg(Labeler.is_worktime, "mean"),
}
)
.reset_index()
.assign(
**{
"dur": lambda df: (
df[StopFeatures.interval.end] - df[StopFeatures.interval.start]
).dt.total_seconds()
* (df[StopFeatures.destination_label] != "-1"),
StopFeatures.is_work: lambda df: _is_maxw(df, "work_rate"),
StopFeatures.is_home: lambda df: _is_maxw(df, "home_rate"),
StopFeatures.info: "N/A",
StopFeatures.device_id: "0",
}
)
.loc[:, get_all_cols(StopFeatures)]
)
def _is_maxw(df, rate_col):
gb_cols = ["_week", StopFeatures.destination_label]
wdf = df.assign(
_week=df[StopFeatures.interval.start].dt.isocalendar().week,
target=df["dur"] * df[rate_col],
)
wsums = wdf.groupby(gb_cols)["target"].sum()
wmaxs = wsums.groupby("_week").transform("max")
return (wsums == wmaxs).reindex(wdf[gb_cols]).values
| 28.323308 | 88 | 0.608707 | from dataclasses import dataclass
from datetime import datetime
import datazimmer as dz
import pandas as pd
from colassigner import ColAssigner, get_all_cols
class NoStops(Exception):
pass
@dataclass
class DaySetup:
work_start: int
work_end: int
home_arrive: int
home_depart: int
class Coordinates(dz.CompositeTypeBase):
lat = float
lon = float
class Interval(dz.CompositeTypeBase):
start = datetime
end = datetime
class PingFeatures(dz.TableFeaturesBase):
loc = Coordinates
datetime = datetime
device_id = str
class StopFeatures(dz.TableFeaturesBase):
device_id = str
destination_label = str
stay_number = int
n_events = int
interval = Interval
center = Coordinates
is_home = bool
is_work = bool
info = str
class Labeler(ColAssigner):
def __init__(self, model, day: DaySetup) -> None:
self.model = model
self.day = day
def ts(self, df):
return df[PingFeatures.datetime].view(int) / 10**9
def hour(self, df):
return df[PingFeatures.datetime].dt.hour
def destination_label(self, df):
arr = df.loc[:, [PingFeatures.loc.lat, PingFeatures.loc.lon, Labeler.ts]].values
try:
return self.model.fit_predict(arr).astype(str)
except Exception as e:
assert "No stop events found" in str(e)
raise NoStops("hopefully")
def stay_number(self, df):
return (
df[Labeler.destination_label] != df[Labeler.destination_label].shift(1)
).cumsum()
def is_worktime(self, df):
return (df[Labeler.hour] >= self.day.work_start) & (
df[Labeler.hour] <= self.day.work_end
)
def is_hometime(self, df):
return (df[Labeler.hour] >= self.day.home_arrive) | (
df[Labeler.hour] <= self.day.home_depart
)
def proc_device_pings(ping_df, model, day: DaySetup):
return (
ping_df.sort_values(PingFeatures.datetime)
.pipe(Labeler(model, day))
.pipe(_gb_stop)
)
def _gb_stop(labeled_df):
dt_col = PingFeatures.datetime
return (
labeled_df.groupby([Labeler.stay_number, Labeler.destination_label])
.agg(
**{
StopFeatures.n_events: pd.NamedAgg(dt_col, "count"),
StopFeatures.interval.start: pd.NamedAgg(dt_col, "first"),
StopFeatures.interval.end: pd.NamedAgg(dt_col, "last"),
StopFeatures.center.lon: pd.NamedAgg(PingFeatures.loc.lon, "mean"),
StopFeatures.center.lat: pd.NamedAgg(PingFeatures.loc.lat, "mean"),
"home_rate": pd.NamedAgg(Labeler.is_hometime, "mean"),
"work_rate": pd.NamedAgg(Labeler.is_worktime, "mean"),
}
)
.reset_index()
.assign(
**{
"dur": lambda df: (
df[StopFeatures.interval.end] - df[StopFeatures.interval.start]
).dt.total_seconds()
* (df[StopFeatures.destination_label] != "-1"),
StopFeatures.is_work: lambda df: _is_maxw(df, "work_rate"),
StopFeatures.is_home: lambda df: _is_maxw(df, "home_rate"),
StopFeatures.info: "N/A",
StopFeatures.device_id: "0",
}
)
.loc[:, get_all_cols(StopFeatures)]
)
def _is_maxw(df, rate_col):
gb_cols = ["_week", StopFeatures.destination_label]
wdf = df.assign(
_week=df[StopFeatures.interval.start].dt.isocalendar().week,
target=df["dur"] * df[rate_col],
)
wsums = wdf.groupby(gb_cols)["target"].sum()
wmaxs = wsums.groupby("_week").transform("max")
return (wsums == wmaxs).reindex(wdf[gb_cols]).values
| true | true |
f73e3f17705797fd5d8b0edb10e010668fc62b78 | 1,013 | py | Python | modeltranslation_rosetta/utils/response.py | Apkawa/django-modeltranslation-rosetta | 568354ceee201f891e1f9f6d1f5987dbdfa8f84a | [
"MIT"
] | null | null | null | modeltranslation_rosetta/utils/response.py | Apkawa/django-modeltranslation-rosetta | 568354ceee201f891e1f9f6d1f5987dbdfa8f84a | [
"MIT"
] | 14 | 2020-01-06T16:18:37.000Z | 2022-01-20T19:40:56.000Z | modeltranslation_rosetta/utils/response.py | Apkawa/django-modeltranslation-rosetta | 568354ceee201f891e1f9f6d1f5987dbdfa8f84a | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
from django.http import HttpResponse
from django.utils.encoding import smart_str, smart_bytes
class FileResponse(HttpResponse):
"""
DRF Response to render data as a PDF File.
kwargs:
- pdf (byte array). The PDF file content.
- file_name (string). The default downloaded file name.
"""
def __init__(self, file_content, file_name, download=True, content_type=None, *args, **kwargs):
disposition = 'filename="{}"'.format(smart_str(file_name))
if download:
disposition = 'attachment; ' + disposition
headers = {
'Content-Disposition': smart_bytes(disposition),
'Content-Length': len(file_content),
}
super(FileResponse, self).__init__(
file_content,
content_type=content_type or 'application/octet-stream',
*args,
**kwargs
)
for h, v in headers.items():
self[h] = v
| 28.942857 | 99 | 0.616979 |
from __future__ import unicode_literals
from django.http import HttpResponse
from django.utils.encoding import smart_str, smart_bytes
class FileResponse(HttpResponse):
def __init__(self, file_content, file_name, download=True, content_type=None, *args, **kwargs):
disposition = 'filename="{}"'.format(smart_str(file_name))
if download:
disposition = 'attachment; ' + disposition
headers = {
'Content-Disposition': smart_bytes(disposition),
'Content-Length': len(file_content),
}
super(FileResponse, self).__init__(
file_content,
content_type=content_type or 'application/octet-stream',
*args,
**kwargs
)
for h, v in headers.items():
self[h] = v
| true | true |
f73e3f52b563db31e518ebacc55f9d01882d8afa | 629 | py | Python | manage.py | kamransadixov/fileshare | 42316012ed5d022fcb1d53a69e304e1c2e60f3ee | [
"MIT"
] | null | null | null | manage.py | kamransadixov/fileshare | 42316012ed5d022fcb1d53a69e304e1c2e60f3ee | [
"MIT"
] | 7 | 2021-03-19T00:32:54.000Z | 2022-01-13T02:19:29.000Z | manage.py | kamransadixov/fileshare | 42316012ed5d022fcb1d53a69e304e1c2e60f3ee | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fileshare.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.590909 | 73 | 0.683625 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fileshare.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f73e3f757d0c78c9954e9b52ef68c58071f5b655 | 136,909 | py | Python | peeringdb_server/management/commands/pdb_api_test.py | egfrank/peeringdb | 5ccb44c2955e29e9d9217f1a984dcb90a661ec62 | [
"BSD-2-Clause"
] | 1 | 2021-01-29T04:08:52.000Z | 2021-01-29T04:08:52.000Z | peeringdb_server/management/commands/pdb_api_test.py | egfrank/peeringdb | 5ccb44c2955e29e9d9217f1a984dcb90a661ec62 | [
"BSD-2-Clause"
] | 7 | 2021-04-06T18:42:16.000Z | 2021-09-08T03:01:50.000Z | peeringdb_server/management/commands/pdb_api_test.py | egfrank/peeringdb | 5ccb44c2955e29e9d9217f1a984dcb90a661ec62 | [
"BSD-2-Clause"
] | null | null | null | #!/bin/env python
"""
series of integration/unit tests for the pdb api
"""
import pytest
import copy
import unittest
import uuid
import random
import re
import time
import datetime
import json
from twentyc.rpc import (
RestClient,
PermissionDeniedException,
InvalidRequestException,
NotFoundException,
)
from grainy.const import (
PERM_READ,
PERM_UPDATE,
PERM_CREATE,
PERM_DELETE,
)
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group
from django.conf import settings
from django.db.utils import IntegrityError
from rest_framework import serializers
from rest_framework.test import APIRequestFactory
from peeringdb_server.models import (
REFTAG_MAP,
QUEUE_ENABLED,
User,
Organization,
Network,
InternetExchange,
Facility,
NetworkContact,
NetworkIXLan,
NetworkFacility,
IXLan,
IXLanPrefix,
InternetExchangeFacility,
DeskProTicket,
)
from peeringdb_server.serializers import REFTAG_MAP as REFTAG_MAP_SLZ
from peeringdb_server import inet, settings as pdb_settings
from peeringdb_server.rest import NetworkViewSet
START_TIMESTAMP = time.time()
SHARED = {}
NUMERIC_TESTS = {
"lt": "Less",
"lte": "LessEqual",
"gt": "Greater",
"gte": "GreaterEqual",
"": "Equal",
}
DATETIME = datetime.datetime.now()
DATE = DATETIME.date()
DATE_YDAY = DATE - datetime.timedelta(days=1)
DATE_TMRW = DATE - datetime.timedelta(days=-1)
DATES = {
"today": (DATE, DATE.strftime("%Y-%m-%d")),
"yesterday": (DATE_YDAY, DATE_YDAY.strftime("%Y-%m-%d")),
"tomorrow": (DATE_TMRW, DATE_TMRW.strftime("%Y-%m-%d")),
}
# entity names
ORG_RW = "API Test Organization RW"
ORG_RW_PENDING = "%s:Pending" % ORG_RW
ORG_R = "API Test Organization R"
NET_R = "%s:Network" % ORG_R
NET_R_PENDING = "%s:Pending" % NET_R
NET_R_DELETED = "%s:Deleted" % NET_R
IX_R = "%s:Exchange" % ORG_R
FAC_R = "%s:Facility" % ORG_R
# user specs
USER = {"user": "api_test", "password": "89c8ec05-b897"}
USER_ORG_ADMIN = {"user": "api_test_org_admin", "password": "89c8ec05-b897"}
USER_ORG_MEMBER = {"user": "api_test_org_member", "password": "89c8ec05-b897"}
USER_CRUD = {
"delete": {"user": "api_test_crud_delete", "password": "89c8ec05-b897"},
"update": {"user": "api_test_crud_update", "password": "89c8ec05-b897"},
"create": {"user": "api_test_crud_create", "password": "89c8ec05-b897"},
}
# server location
URL = settings.API_URL
# common
CITY = "Chicago"
COUNTRY = "US"
CONTINENT = "North America"
PHONE = "+12065550199"
WEBSITE = "http://www.test.apitest"
STATE = "IL"
ZIPCODE = "1-2345"
NOTE = "This is a test entry made by a script to test out the API"
EMAIL = "test@20c.com"
VERBOSE = False
PREFIXES_V4 = [
"206.223.114.0/24",
"206.223.115.0/24",
"206.223.116.0/24",
"206.223.117.0/24",
"206.223.118.0/24",
"206.223.119.0/24",
"206.223.120.0/24",
"206.223.121.0/24",
"206.223.122.0/24",
]
PREFIXES_V6 = [
"2001:504:0:1::/64",
"2001:504:0:2::/64",
"2001:504:0:3::/64",
"2001:504:0:4::/64",
"2001:504:0:5::/64",
"2001:504:0:6::/64",
"2001:504:0:7::/64",
"2001:504:0:8::/64",
"2001:504:0:9::/64",
]
class TestJSON(unittest.TestCase):
rest_client = RestClient
PREFIX_COUNT = 110
IP4_COUNT = 1
IP6_COUNT = 1
@classmethod
def get_ip6(cls, ixlan):
hosts = []
for host in (
ixlan.ixpfx_set.filter(status=ixlan.status, protocol=6)
.first()
.prefix.hosts()
):
if len(hosts) < 100:
hosts.append(host)
else:
break
r = "{}".format(hosts[cls.IP6_COUNT])
cls.IP6_COUNT += 1
return r
@classmethod
def get_ip4(cls, ixlan):
hosts = []
for host in (
ixlan.ixpfx_set.filter(status=ixlan.status, protocol=4)
.first()
.prefix.hosts()
):
if len(hosts) < 100:
hosts.append(host)
else:
break
r = "{}".format(hosts[cls.IP4_COUNT])
cls.IP4_COUNT += 1
return r
@classmethod
def get_prefix4(cls):
r = f"206.41.{cls.PREFIX_COUNT}.0/24"
cls.PREFIX_COUNT += 1
return r
@classmethod
def get_prefix6(cls):
r = f"2001:504:41:{cls.PREFIX_COUNT}::/64"
cls.PREFIX_COUNT += 1
return r
def setUp(self):
self.db_guest = self.rest_client(URL, verbose=VERBOSE)
self.db_user = self.rest_client(URL, verbose=VERBOSE, **USER)
self.db_org_member = self.rest_client(URL, verbose=VERBOSE, **USER_ORG_MEMBER)
self.db_org_admin = self.rest_client(URL, verbose=VERBOSE, **USER_ORG_ADMIN)
self.user_org_admin = User.objects.get(username="api_test_org_admin")
self.user_org_member = User.objects.get(username="api_test_org_member")
for p, specs in list(USER_CRUD.items()):
setattr(
self, "db_crud_%s" % p, self.rest_client(URL, verbose=VERBOSE, **specs)
)
def all_dbs(self, exclude=[]):
return [
db
for db in [
self.db_guest,
self.db_org_member,
self.db_user,
self.db_org_admin,
self.db_crud_create,
self.db_crud_delete,
self.db_crud_update,
]
if db not in exclude
]
def readonly_dbs(self, exclude=[]):
return [
db
for db in [self.db_guest, self.db_org_member, self.db_user]
if db not in exclude
]
##########################################################################
@classmethod
def make_data_org(self, **kwargs):
data = {
"name": self.make_name("Test"),
"website": WEBSITE,
"notes": NOTE,
"address1": "address",
"address2": "address",
"city": CITY,
"country": COUNTRY,
"state": "state",
"zipcode": "12345",
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_ix(self, **kwargs):
data = {
"name": self.make_name("Test"),
"org_id": SHARED["org_rw_ok"].id,
"name_long": self.make_name("Test Long Name"),
"city": CITY,
"country": COUNTRY,
"region_continent": CONTINENT,
"media": "Ethernet",
"notes": NOTE,
"proto_unicast": True,
"proto_multicast": False,
"proto_ipv6": True,
"website": WEBSITE,
"url_stats": "%s/stats" % WEBSITE,
"tech_email": EMAIL,
"tech_phone": PHONE,
"policy_email": EMAIL,
"policy_phone": PHONE,
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_fac(self, **kwargs):
data = {
"name": self.make_name("Test"),
"org_id": SHARED["org_rw_ok"].id,
"website": WEBSITE,
"city": CITY,
"zipcode": ZIPCODE,
"address1": "Some street",
"clli": str(uuid.uuid4())[:6].upper(),
"rencode": "",
"npanxx": "000-111",
"latitude": None,
"longitude": None,
"notes": NOTE,
"country": COUNTRY,
"tech_email": EMAIL,
"tech_phone": PHONE,
"sales_email": EMAIL,
"sales_phone": PHONE,
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_net(self, **kwargs):
try:
asn = Network.objects.order_by("-asn").first().asn
except AttributeError:
asn = 90000000
if asn < 90000000:
asn = 90000000
else:
asn = asn + 1
data = {
"name": self.make_name("Test"),
"org_id": SHARED["org_rw_ok"].id,
"aka": self.make_name("Also known as"),
"asn": asn,
"website": WEBSITE,
"irr_as_set": "AS-ZZ-ZZZZZZ@RIPE",
"info_type": "NSP",
"info_prefixes4": 11000,
"info_prefixes6": 12000,
"info_traffic": "1-5Tbps",
"info_ratio": "Mostly Outbound",
"info_scope": "Global",
"info_unicast": True,
"info_multicast": False,
"info_ipv6": True,
"info_never_via_route_servers": True,
"notes": NOTE,
"policy_url": "%s/policy" % WEBSITE,
"policy_general": "Restrictive",
"policy_locations": "Required - International",
"policy_ratio": True,
"policy_contracts": "Required",
"allow_ixp_update": True,
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_poc(self, **kwargs):
data = {
"net_id": 1,
"role": "Technical",
"visible": "Private",
"name": "NOC",
"phone": PHONE,
"email": EMAIL,
"url": WEBSITE,
}
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_ixlan(self, **kwargs):
data = {
"ix_id": 1,
"id": 1,
"name": self.make_name("Test"),
"descr": NOTE,
"mtu": 12345,
"dot1q_support": False,
"ixf_ixp_member_list_url_visible": "Private",
"rs_asn": 12345,
"arp_sponge": None,
}
if "ix_id" in kwargs:
data["id"] = kwargs.get("ix_id")
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_ixpfx(self, **kwargs):
data = {
"ixlan_id": SHARED["ixlan_r_ok"].id,
"protocol": "IPv4",
"prefix": "10.%d.10.0/23" % (self.PREFIX_COUNT + 1),
"in_dfz": True,
}
if "prefix" not in kwargs:
self.PREFIX_COUNT += 1
data.update(**kwargs)
return data
##########################################################################
@classmethod
def make_data_netixlan(self, rename={}, **kwargs):
data = {
"net_id": SHARED["net_r_ok"].id,
"ixlan_id": SHARED["ixlan_r_ok"].id,
"notes": NOTE,
"speed": 30000,
"asn": 12345,
}
data.update(**kwargs)
for k, v in list(rename.items()):
data[v] = data[k]
del data[k]
data.update(
ipaddr4=self.get_ip4(IXLan.objects.get(id=data["ixlan_id"])),
ipaddr6=self.get_ip6(IXLan.objects.get(id=data["ixlan_id"])),
)
return data
##########################################################################
@classmethod
def make_name(self, name):
return f"api-test:{name}:{uuid.uuid4()}"
##########################################################################
@classmethod
def serializer_related_fields(cls, serializer_class):
"""
Returns declared relation fields on the provided serializer class
Returned value will be a tuple in which the first item is a list of
field names for primary key related fields and the second item is a list
of fields names for related sets
"""
pk_rel = []
nested_rel = []
for name, fld in list(serializer_class._declared_fields.items()):
if type(fld) == serializers.PrimaryKeyRelatedField:
pk_rel.append(name[:-3])
elif isinstance(fld, serializers.ListSerializer):
nested_rel.append((name, fld.child))
return (pk_rel, nested_rel)
##########################################################################
def assert_handleref_integrity(self, data):
"""
here we assert the integrity of a handleref (which is
the base of all the models exposed on the api)
we do this by making sure all of the handleref fields
exist in the data
"""
self.assertIn("status", data)
# self.assertIn("version", data)
self.assertIn("id", data)
self.assertIn("created", data)
self.assertIn("updated", data)
self.assertNotEqual("created", None)
##########################################################################
def assert_data_integrity(self, data, typ, ignore=[]):
if hasattr(self, "make_data_%s" % typ):
msg = "data integrity failed on key '%s'"
func = getattr(self, "make_data_%s" % typ)
for k, v in list(func().items()):
if k in ignore:
continue
if type(v) in [str, str]:
self.assertIn(type(data.get(k)), [str, str], msg=msg % k)
elif type(v) in [int, int]:
self.assertIn(type(data.get(k)), [int, int], msg=msg % k)
else:
self.assertEqual(type(v), type(data.get(k)), msg=msg % k)
##########################################################################
def assert_get_single(self, data):
self.assertEqual(len(data), 1)
return data[0]
##########################################################################
def assert_get_forbidden(self, db, typ, id):
with pytest.raises(PermissionDeniedException):
db.get(typ, id)
##########################################################################
def assert_get_handleref(self, db, typ, id):
data = self.assert_get_single(db.get(typ, id))
self.assert_handleref_integrity(data)
self.assert_data_integrity(data, typ)
return data
##########################################################################
def assert_existing_fields(self, a, b, ignore={}):
for k, v in list(a.items()):
if ignore and k in ignore:
continue
if k in ["suggest"]:
continue
self.assertEqual(v, b.get(k))
##########################################################################
def assert_delete(
self, db, typ, test_success=None, test_failure=None, test_protected=None
):
if test_success:
db.rm(typ, test_success)
with pytest.raises(NotFoundException):
self.assert_get_handleref(db, typ, test_success)
if test_failure:
with pytest.raises(PermissionDeniedException):
db.rm(typ, test_failure)
try:
self.assert_get_handleref(db, typ, test_failure)
except PermissionDeniedException:
pass
if test_protected:
with pytest.raises(PermissionDeniedException):
db.rm(typ, test_protected)
assert DeskProTicket.objects.filter(
subject__icontains=f"{typ}-{test_protected}"
).exists()
##########################################################################
def assert_create(
self, db, typ, data, test_failures=None, test_success=True, **kwargs
):
if test_success:
r_data = self.assert_get_single(
db.create(typ, data, return_response=True).get("data")
)
self.assert_existing_fields(data, r_data, ignore=kwargs.get("ignore"))
self.assertGreater(r_data.get("id"), 0)
status_checked = False
for model in QUEUE_ENABLED:
if hasattr(model, "handleref") and model.handleref.tag == typ:
self.assertEqual(r_data.get("status"), "pending")
status_checked = True
if not status_checked:
self.assertEqual(r_data.get("status"), "ok")
else:
r_data = {}
# if test_failures is set we want to test fail conditions
if test_failures:
# we test fail because of invalid data
if "invalid" in test_failures:
tests = test_failures["invalid"]
if not isinstance(tests, list):
tests = [tests]
for test in tests:
data_invalid = copy.copy(data)
for k, v in list(test.items()):
data_invalid[k] = v
with pytest.raises(InvalidRequestException) as excinfo:
r = db.create(typ, data_invalid, return_response=True)
assert "400 Bad Request" in str(excinfo.value)
# we test fail because of parent entity status
if "status" in test_failures:
data_status = copy.copy(data)
for k, v in list(test_failures["status"].items()):
data_status[k] = v
with pytest.raises(InvalidRequestException) as excinfo:
r = db.create(typ, data_status, return_response=True)
assert "not yet been approved" in str(excinfo.value)
# we test fail because of permissions
if "perms" in test_failures:
data_perms = copy.copy(data)
for k, v in list(test_failures["perms"].items()):
data_perms[k] = v
with pytest.raises(PermissionDeniedException):
db.create(typ, data_perms, return_response=True)
return r_data
##########################################################################
def assert_create_status_failure(self, db, typ, data):
"""
Wrapper for assert_create for assertion of permission failure
"""
self.assert_create(
db, typ, data, test_failures={"status": {}}, test_success=False
)
##########################################################################
def assert_update(self, db, typ, id, data, test_failures=False, test_success=True):
if test_success:
orig = self.assert_get_handleref(db, typ, id)
orig.update(**data)
else:
orig = {"id": id}
orig.update(**data)
for k, v in list(orig.items()):
if k[-3:] == "_id" and k[:-3] in orig:
del orig[k[:-3]]
if test_success:
db.update(typ, **orig)
u_data = self.assert_get_handleref(db, typ, id)
if type(test_success) == list:
for test in test_success:
if test and callable(test):
test(data, u_data)
else:
# self.assertGreater(u_data["version"], orig["version"])
for k, v in list(data.items()):
self.assertEqual(u_data.get(k), v)
# if test_failures is set we want to test fail conditions
if test_failures:
# we test fail because of invalid data
if "invalid" in test_failures:
tests = test_failures["invalid"]
# `invalid` test_failures can be a list to
# test multiple instances of invalid values
# however we also support passing a single
# dict of fields, in which case we wrap
# it in a list here.
if not isinstance(tests, list):
tests = [tests]
for test in tests:
data_invalid = copy.copy(orig)
for k, v in list(test.items()):
data_invalid[k] = v
with pytest.raises(InvalidRequestException) as excinfo:
db.update(typ, **data_invalid)
assert "400 Bad Request" in str(excinfo.value)
# we test fail because of permissions
if "perms" in test_failures:
data_perms = copy.copy(orig)
for k, v in list(test_failures["perms"].items()):
data_perms[k] = v
# if data is empty set something so we don't
# trigger the empty data error
data_perms["_dummy_"] = 1
with pytest.raises(PermissionDeniedException):
db.update(typ, **data_perms)
# we test failure to update readonly fields
if "readonly" in test_failures:
data_ro = copy.copy(orig)
b_data = self.assert_get_handleref(db, typ, id)
data_ro.update(**test_failures["readonly"])
db.update(typ, **data_ro)
u_data = self.assert_get_handleref(db, typ, id)
for k, v in list(test_failures["readonly"].items()):
self.assertEqual(u_data.get(k), b_data.get(k))
##########################################################################
def assert_list_filter_related(
self, target, rel, fld="id", valid=None, valid_m=None
):
# if not valid:
# valid = [o.id for k, o in SHARED.items() if type(
# o) != int and k.find("%s_" % target) == 0]
if fld != "id":
qfld = "_%s" % fld
else:
qfld = fld
ids = [
getattr(SHARED["%s_r_ok" % rel], fld),
getattr(SHARED["%s_rw_ok" % rel], fld),
]
kwargs_s = {f"{rel}_{qfld}": getattr(SHARED["%s_r_ok" % rel], fld)}
kwargs_m = {f"{rel}_{qfld}__in": ",".join([str(id) for id in ids])}
attr = getattr(REFTAG_MAP[target], rel, None)
if attr and not isinstance(attr, property):
valid_s = [
r.id
for r in REFTAG_MAP[target]
.objects.filter(**kwargs_s)
.filter(status="ok")
]
valid_m = [
r.id
for r in REFTAG_MAP[target]
.objects.filter(**{f"{rel}_{qfld}__in": ids})
.filter(status="ok")
]
elif target == "poc":
valid_s = [SHARED["%s_r_ok_public" % target].id]
valid_m = [
SHARED["%s_r_ok_public" % target].id,
SHARED["%s_rw_ok_public" % target].id,
]
elif target == "ixpfx":
valid_s = [
SHARED["%s_r_ok" % target].id,
SHARED["%s_r_v6_ok" % target].id,
]
valid_m = [
SHARED["%s_r_ok" % target].id,
SHARED["%s_rw_ok" % target].id,
SHARED["%s_r_v6_ok" % target].id,
SHARED["%s_rw_v6_ok" % target].id,
]
else:
valid_s = [SHARED["%s_r_ok" % target].id]
valid_m = [SHARED["%s_r_ok" % target].id, SHARED["%s_rw_ok" % target].id]
# exact
data = self.db_guest.all(target, **kwargs_s)
self.assertGreater(len(data), 0)
for row in data:
self.assert_data_integrity(row, target)
self.assertIn(row["id"], valid_s)
# in
data = self.db_guest.all(target, **kwargs_m)
self.assertGreater(len(data), 0)
for row in data:
self.assert_data_integrity(row, target)
self.assertIn(row["id"], valid_m)
##########################################################################
def assert_related_depth(
self,
obj,
serializer_class,
r_depth,
t_depth,
note_tag,
typ="listing",
list_exclude=[],
):
"""
Assert the data indegrity of structures within a result that have
been expanded via the depth parameter
"""
# get all the realtion ship properties declared in the serializer
pk_flds, n_flds = self.serializer_related_fields(serializer_class)
# some tag so we can track where the assertions fail since this will
# be doing nested checks
note_tag = "%s(%d/%d)" % (note_tag, r_depth, t_depth)
# first check that the provided object is not None, as this should
# never be the case
self.assertNotEqual(obj, None, msg=note_tag)
# single primary key relation fields
for pk_fld in pk_flds:
# serializer has marked field as to be excluded from serialized data
# don't check for it
if pk_fld in list_exclude:
continue
if typ == "listing":
# in listing mode, depth should never expand pk relations
self.assertEqual(
obj.get(pk_fld), None, msg=f"PK Relation {note_tag} {pk_fld}"
)
else:
# in single get mode, expand everything as long as we are at
# a relative depth greater than 1
if r_depth >= 1:
self.assert_related_depth(
obj.get(pk_fld),
REFTAG_MAP_SLZ.get(pk_fld),
r_depth - 1,
t_depth,
f"{note_tag}.{pk_fld}",
typ=typ,
)
else:
self.assertIn(
type(obj.get(pk_fld)),
[int, type(None)],
msg=f"PK Relation {note_tag} {pk_fld}",
)
# nested set relations
for n_fld, n_fld_cls in n_flds:
if r_depth > 1:
# sets should be expanded to objects
self.assertIn(
n_fld, obj, msg=f"Nested set existing (dN) {note_tag} {n_fld}"
)
# make sure set exists and is of the correct type
self.assertEqual(
type(obj[n_fld]),
list,
msg=f"Nested set list type (dN) {note_tag} {n_fld}",
)
# assert further depth expansions on all expanded objects in
# the set
for row in obj[n_fld]:
self.assert_related_depth(
row,
n_fld_cls,
r_depth - 2,
t_depth,
f"{note_tag}.{n_fld}",
typ=typ,
list_exclude=getattr(n_fld_cls.Meta, "list_exclude", []),
)
elif r_depth == 1:
# sets should be expanded to ids
self.assertIn(
n_fld, obj, msg=f"Nested set existing (d1) {note_tag} {n_fld}"
)
# make sure set exists and is of the correct type
self.assertEqual(
type(obj[n_fld]),
list,
msg=f"Nested set list type (d1) {note_tag} {n_fld}",
)
# make all values in the set are of type int or long
for row in obj[n_fld]:
self.assertIn(
type(row),
[int, int],
msg=f"Nested set containing ids (d1) {note_tag} {n_fld}",
)
else:
# sets should not exist
self.assertNotIn(
n_fld,
obj,
msg=f"Netsted set not existing (d0) {note_tag} {n_fld}",
)
##########################################################################
# TESTS WITH USER THAT IS NOT A MEMBER OF AN ORGANIZATION
##########################################################################
def test_user_001_GET_org(self):
self.assert_get_handleref(self.db_user, "org", SHARED["org_r_ok"].id)
##########################################################################
def test_user_001_GET_net(self):
data = self.assert_get_handleref(self.db_user, "net", SHARED["net_r_ok"].id)
self.assertNotEqual(len(data.get("poc_set")), 0)
##########################################################################
def test_user_001_GET_ix(self):
self.assert_get_handleref(self.db_user, "ix", SHARED["ix_r_ok"].id)
##########################################################################
def test_user_001_GET_ix_net_count(self):
data = self.assert_get_handleref(self.db_user, "ix", SHARED["ix_r_ok"].id)
self.assertEqual(data.get("net_count"), 1)
##########################################################################
def test_user_001_GET_fac(self):
self.assert_get_handleref(self.db_user, "fac", SHARED["fac_r_ok"].id)
##########################################################################
def test_user_001_GET_fac_netcount(self):
data = self.assert_get_handleref(self.db_user, "fac", SHARED["fac_r_ok"].id)
self.assertEqual(data.get("net_count"), 1)
##########################################################################
def test_user_001_GET_poc_public(self):
self.assert_get_handleref(self.db_user, "poc", SHARED["poc_r_ok_public"].id)
##########################################################################
def test_user_001_GET_poc_users(self):
self.assert_get_handleref(self.db_user, "poc", SHARED["poc_r_ok_users"].id)
##########################################################################
def test_user_001_GET_poc_private(self):
self.assert_get_forbidden(self.db_user, "poc", SHARED["poc_r_ok_private"].id)
##########################################################################
def test_user_001_GET_nefac(self):
self.assert_get_handleref(self.db_user, "netfac", SHARED["netfac_r_ok"].id)
##########################################################################
def test_user_001_GET_netixlan(self):
self.assert_get_handleref(self.db_user, "netixlan", SHARED["netixlan_r_ok"].id)
##########################################################################
def test_user_001_GET_ixfac(self):
self.assert_get_handleref(self.db_user, "ixfac", SHARED["ixfac_r_ok"].id)
##########################################################################
def test_user_001_GET_ixlan(self):
self.assert_get_handleref(self.db_user, "ixlan", SHARED["ixlan_r_ok"].id)
##########################################################################
def test_user_001_GET_ixlan_ixf_ixp_member_list_url(self):
for ixlan in self.db_user.all(
"ixlan", ixf_ixp_member_list_url__startswith="http"
):
if ixlan["ixf_ixp_member_list_url_visible"] in ["Public", "Users"]:
assert ixlan["ixf_ixp_member_list_url"] == "http://localhost"
else:
assert "ixf_ixp_member_list_url" not in ixlan
##########################################################################
def test_user_001_GET_ixpfx(self):
self.assert_get_handleref(self.db_user, "ixpfx", SHARED["ixpfx_r_ok"].id)
##########################################################################
def test_user_005_list_poc(self):
data = self.db_guest.all("poc", limit=1000)
for row in data:
self.assertIn(row.get("visible"), ["Users", "Public"])
data = self.db_guest.all("poc", visible="Private", limit=100)
self.assertEqual(0, len(data))
##########################################################################
def test_user_001_GET_as_set(self):
data = self.db_guest.all("as_set")
networks = Network.objects.filter(status="ok")
print(data)
for net in networks:
self.assertEqual(data[0].get(f"{net.asn}"), net.irr_as_set)
##########################################################################
# TESTS WITH USER THAT IS ORGANIZATION MEMBER
##########################################################################
def test_org_member_001_GET_poc_public(self):
self.assert_get_handleref(
self.db_org_member, "poc", SHARED["poc_r_ok_public"].id
)
##########################################################################
def test_org_member_001_GET_poc_users(self):
self.assert_get_handleref(
self.db_org_member, "poc", SHARED["poc_r_ok_users"].id
)
##########################################################################
def test_org_member_001_GET_poc_private(self):
self.assert_get_handleref(
self.db_org_member, "poc", SHARED["poc_r_ok_private"].id
)
#########################################################################
def test_org_member_001_GET_ixlan_ixf_ixp_member_list_url(self):
for ixlan in self.db_org_member.all(
"ixlan", ixf_ixp_member_list_url__startswith="http"
):
if ixlan["ixf_ixp_member_list_url_visible"] in ["Public", "Users"]:
assert ixlan["ixf_ixp_member_list_url"] == "http://localhost"
else:
if ixlan["id"] == SHARED["ixlan_r3_ok"].id:
assert ixlan["ixf_ixp_member_list_url"] == "http://localhost"
else:
assert "ixf_ixp_member_list_url" not in ixlan
#########################################################################
def test_org_member_001_POST_ix_with_perms(self):
data = self.make_data_ix(prefix=self.get_prefix4())
org = SHARED["org_rw_ok"]
org.usergroup.user_set.add(self.user_org_member)
self.user_org_member.grainy_permissions.add_permission(org, "cr")
r_data = self.assert_create(
self.db_org_member,
"ix",
data,
ignore=["prefix"],
)
##########################################################################
# TESTS WITH USER THAT IS ORGANIZATION ADMINISTRATOR
##########################################################################
##########################################################################
def test_org_admin_001_GET_poc_public(self):
self.assert_get_handleref(
self.db_org_admin, "poc", SHARED["poc_r_ok_public"].id
)
##########################################################################
def test_org_admin_001_GET_poc_users(self):
self.assert_get_handleref(self.db_org_admin, "poc", SHARED["poc_r_ok_users"].id)
##########################################################################
def test_org_admin_001_GET_poc_private(self):
# org admin is admin of rw org, so trying to access the private poc of the r org
# should still be forbidden
self.assert_get_forbidden(
self.db_org_admin, "poc", SHARED["poc_r_ok_private"].id
)
#########################################################################
def test_org_admin_001_GET_ixlan_ixf_ixp_member_list_url(self):
for ixlan in self.db_org_admin.all(
"ixlan", ixf_ixp_member_list_url__startswith="http"
):
if ixlan["ixf_ixp_member_list_url_visible"] in ["Public", "Users"]:
assert ixlan["ixf_ixp_member_list_url"] == "http://localhost"
else:
if ixlan["id"] == SHARED["ixlan_rw3_ok"].id:
assert ixlan["ixf_ixp_member_list_url"] == "http://localhost"
else:
assert "ixf_ixp_member_list_url" not in ixlan
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ix(self):
data = self.make_data_ix(prefix=self.get_prefix4())
r_data = self.assert_create(
self.db_org_admin,
"ix",
data,
ignore=["prefix"],
test_failures={
"invalid": {"prefix": self.get_prefix4(), "name": ""},
"perms": {
"prefix": self.get_prefix4(),
# need to set name again so it doesnt fail unique validation
"name": self.make_name("Test"),
# set org to an organization the user doesnt have perms to
"org_id": SHARED["org_r_ok"].id,
},
"status": {
# need to set name again so it doesnt fail unique validation
"prefix": self.get_prefix4(),
"name": self.make_name("Test"),
"org_id": SHARED["org_rwp"].id,
},
},
)
# test that ixlan id and prefix id were return in the POST
# response (see #609)
assert r_data.get("ixlan_id") > 0
assert r_data.get("ixpfx_id") > 0
SHARED["ix_id"] = r_data.get("id")
# make sure ixlan was created and has matching id
ix = InternetExchange.objects.get(id=SHARED["ix_id"])
assert ix.ixlan
assert ix.ixlan.id == ix.id
self.assert_update(
self.db_org_admin,
"ix",
SHARED["ix_id"],
{"name": self.make_name("Test")},
test_failures={
"invalid": {"name": ""},
"perms": {"id": SHARED["ix_r_ok"].id},
"readonly": {"ixf_net_count": 50, "ixf_last_import": "not even valid"},
},
)
self.assert_delete(
self.db_org_admin,
"ix",
test_success=SHARED["ix_id"],
test_failure=SHARED["ix_r_ok"].id,
)
self.assert_create(
self.db_org_admin,
"ix",
data,
test_success=False,
test_failures={
"invalid": {
"prefix": self.get_prefix4(),
"tech_email": "",
},
},
)
self.assert_create(
self.db_org_admin,
"ix",
data,
test_success=False,
test_failures={
"invalid": {
"prefix": self.get_prefix4(),
"website": "",
},
},
)
self.assert_create(
self.db_org_admin,
"ix",
data,
test_success=False,
test_failures={
"invalid": {"prefix": ""},
},
)
# test ix creation with a ipv6 prefix
data = self.make_data_ix(prefix=self.get_prefix6())
self.assert_create(self.db_org_admin, "ix", data, ignore=["prefix"])
# check protected ix validation
self.assert_delete(
self.db_org_admin,
"ix",
test_protected=SHARED["ix_rw_ok"].id,
)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_fac(self):
data = self.make_data_fac()
r_data = self.assert_create(
self.db_org_admin,
"fac",
data,
test_failures={
"invalid": [
{"name": ""},
{"name": self.make_name("Test"), "website": ""},
],
"perms": {
# need to set name again so it doesnt fail unique validation
"name": self.make_name("Test"),
# set org to an organization the user doesnt have perms to
"org_id": SHARED["org_r_ok"].id,
},
"status": {
"name": self.make_name("Test"),
"org_id": SHARED["org_rwp"].id,
},
},
)
SHARED["fac_id"] = r_data.get("id")
self.assert_update(
self.db_org_admin,
"fac",
SHARED["fac_id"],
{"name": self.make_name("Test")},
test_failures={
"invalid": {"name": ""},
"perms": {"id": SHARED["fac_r_ok"].id},
"readonly": {
"latitude": 1, # this should not take as it is read only
"longitude": 1, # this should not take as it is read only
"rencode": str(uuid.uuid4())[
:6
].upper(), # this should not take as it is read only
},
},
)
self.assert_delete(
self.db_org_admin,
"fac",
test_success=SHARED["fac_id"],
test_failure=SHARED["fac_r_ok"].id,
)
# check protected ix validation
self.assert_delete(
self.db_org_admin,
"fac",
test_protected=SHARED["fac_rw_ok"].id,
)
# Create new data with a non-null rencode
data_new = self.make_data_fac()
obsolete_rencode = str(uuid.uuid4())[:6].upper()
data_new["rencode"] = obsolete_rencode
# Data should be successfully created
r_data_new = self.assert_get_single(
self.db_org_admin.create("fac", data_new, return_response=True).get("data")
)
# But rencode should be null
assert r_data_new["rencode"] == ""
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_fac_zipcode(self):
data = self.make_data_fac()
# Requires a zipcode if country is a country
# with postal codes (ie US)
r_data = self.assert_create(
self.db_org_admin,
"fac",
data,
test_failures={
"invalid": [
{"name": self.make_name("Test"), "zipcode": ""},
],
},
test_success=False,
)
# Change to country w/o postal codes
data["country"] = "ZW"
data["zipcode"] = ""
r_data = self.assert_create(
self.db_org_admin,
"fac",
data,
)
assert r_data["zipcode"] == ""
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_net(self):
data = self.make_data_net(asn=9000900)
r_data = self.assert_create(
self.db_org_admin,
"net",
data,
test_failures={
"invalid": {"name": ""},
"perms": {
# need to set name again so it doesnt fail unique validation
"name": self.make_name("Test"),
"asn": data["asn"] + 1,
# set org to an organization the user doesnt have perms to
"org_id": SHARED["org_r_ok"].id,
},
"status": {
"org_id": SHARED["org_rwp"].id,
"asn": data["asn"] + 1,
"name": self.make_name("Test"),
},
},
)
SHARED["net_id"] = r_data.get("id")
self.assert_update(
self.db_org_admin,
"net",
SHARED["net_id"],
{"name": self.make_name("Test")},
test_failures={
"invalid": {"name": ""},
"perms": {"id": SHARED["net_r_ok"].id},
},
)
# Test ASN cannot update
self.assert_update(
self.db_org_admin,
"net",
SHARED["net_id"],
data,
test_failures={
"invalid": {"asn": data["asn"] + 1},
},
)
self.assert_delete(
self.db_org_admin,
"net",
test_success=SHARED["net_id"],
test_failure=SHARED["net_r_ok"].id,
)
# Test RiR not found failure
r_data = self.assert_create(
self.db_org_admin,
"net",
data,
test_failures={"invalid": {"asn": 9999999}},
test_success=False,
)
##########################################################################
def test_org_admin_002_POST_net_looking_glass_url(self):
for scheme in ["http", "https", "ssh", "telnet"]:
r_data = self.assert_create(
self.db_org_admin,
"net",
self.make_data_net(asn=9000900, looking_glass=f"{scheme}://foo.bar"),
test_failures={"invalid": {"looking_glass": "foo://www.bar.com"}},
)
Network.objects.get(id=r_data["id"]).delete(hard=True)
##########################################################################
def test_org_admin_002_POST_net_route_server_url(self):
for scheme in ["http", "https", "ssh", "telnet"]:
r_data = self.assert_create(
self.db_org_admin,
"net",
self.make_data_net(asn=9000900, route_server=f"{scheme}://foo.bar"),
test_failures={"invalid": {"route_server": "foo://www.bar.com"}},
)
Network.objects.get(id=r_data["id"]).delete(hard=True)
##########################################################################
def test_org_admin_002_POST_net_deleted(self):
data = self.make_data_net(asn=SHARED["net_rw_dupe_deleted"].asn)
with pytest.raises(InvalidRequestException) as excinfo:
r_data = self.db_org_admin.create("net", data, return_response=True)
# check exception vs value
assert "Network has been deleted. Please contact" in excinfo.value.extra["asn"]
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_as_set(self):
"""
The as-set endpoint is readonly, so all of these should
fail
"""
data = self.make_data_net(asn=9000900)
with pytest.raises(PermissionDeniedException) as excinfo:
r_data = self.assert_create(self.db_org_admin, "as_set", data)
assert "You do not have permission" in str(excinfo.value)
with pytest.raises(PermissionDeniedException) as excinfo:
self.db_org_admin.update("as_set", {"9000900": "AS-ZZZ"})
assert "You do not have permission" in str(excinfo.value)
with pytest.raises(PermissionDeniedException) as excinfo:
self.db_org_admin.rm("as_set", SHARED["net_rw_ok"].asn)
assert "You do not have permission" in str(excinfo.value)
##########################################################################
def test_org_admin_002_POST_net_bogon_asn(self):
# Test bogon asn failure
data = self.make_data_net()
for bogon_asn in inet.BOGON_ASN_RANGES:
r_data = self.assert_create(
self.db_org_admin,
"net",
data,
test_failures={"invalid": {"asn": bogon_asn[0]}},
test_success=False,
)
# server running in tutorial mode should be allowed
# to create networks with bogon asns, so we test that
# as well
pdb_settings.TUTORIAL_MODE = True
for bogon_asn in inet.TUTORIAL_ASN_RANGES:
data = self.make_data_net(asn=bogon_asn[0])
r_data = self.assert_create(self.db_org_admin, "net", data)
pdb_settings.TUTORIAL_MODE = False
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_netfac(self):
data = {
"net_id": SHARED["net_rw_ok"].id,
"fac_id": SHARED["fac_rw_ok"].id,
}
r_data = self.assert_create(
self.db_org_admin,
"netfac",
data,
test_failures={
"invalid": {"net_id": ""},
"perms": {
# set network to one the user doesnt have perms to
"net_id": SHARED["net_r_ok"].id
},
"status": {
"net_id": SHARED["net_rw_pending"].id,
"fac_id": SHARED["fac_rw_pending"].id,
},
},
)
SHARED["netfac_id"] = r_data.get("id")
self.assert_update(
self.db_org_admin,
"netfac",
SHARED["netfac_id"],
data,
test_success=False,
test_failures={
"invalid": {"fac_id": ""},
"perms": {"net_id": SHARED["net_r_ok"].id},
},
)
self.assert_delete(
self.db_org_admin,
"netfac",
test_success=SHARED["netfac_id"],
test_failure=SHARED["netfac_r_ok"].id,
)
# re-create deleted netfac
r_data = self.assert_create(self.db_org_admin, "netfac", data)
# re-delete
self.assert_delete(
self.db_org_admin, "netfac", test_success=SHARED["netfac_id"]
)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_poc(self):
data = self.make_data_poc(net_id=SHARED["net_rw_ok"].id)
r_data = self.assert_create(
self.db_org_admin,
"poc",
data,
test_failures={
"invalid": {"net_id": ""},
"perms": {
# set network to one the user doesnt have perms to
"net_id": SHARED["net_r_ok"].id
},
"status": {"net_id": SHARED["net_rw_pending"].id},
},
)
SHARED["poc_id"] = r_data.get("id")
self.assert_update(
self.db_org_admin,
"poc",
SHARED["poc_id"],
{"role": "Sales"},
test_failures={
"invalid": {"role": "NOPE"},
"perms": {"net_id": SHARED["net_r_ok"].id},
},
)
self.assert_delete(
self.db_org_admin,
"poc",
test_success=SHARED["poc_id"],
test_failure=SHARED["poc_r_ok_users"].id,
)
# soft-deleted pocs should return blank
# values for sensitive fields (#569)
poc = self.db_org_admin.all("poc", id=SHARED["poc_id"], since=1)[0]
assert poc["name"] == ""
assert poc["phone"] == ""
assert poc["email"] == ""
assert poc["url"] == ""
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ixlan(self):
data = self.make_data_ixlan(ix_id=SHARED["ix_rw_ok"].id)
with self.assertRaises(Exception) as exc:
r_data = self.assert_create(
self.db_org_admin,
"ixlan",
data,
test_failures={
"invalid": {"ix_id": ""},
"perms": {"ix_id": SHARED["ix_r_ok"].id},
"status": {"ix_id": SHARED["ix_rw_pending"].id},
},
)
self.assertIn('Method "POST" not allowed', str(exc.exception))
self.assert_update(
self.db_org_admin,
"ixlan",
SHARED["ixlan_rw_ok"].id,
{"name": self.make_name("Test")},
test_failures={
"invalid": {"mtu": "NEEDS TO BE INT"},
"perms": {"ix_id": SHARED["ix_r_ok"].id},
},
)
with self.assertRaises(Exception) as exc:
self.assert_delete(
self.db_org_admin,
"ixlan",
test_success=SHARED["ixlan_rw_ok"].id,
test_failure=SHARED["ixlan_r_ok"].id,
)
self.assertIn('Method "DELETE" not allowed', str(exc.exception))
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ixpfx(self):
data = self.make_data_ixpfx(
ixlan_id=SHARED["ixlan_rw_ok"].id, prefix="206.126.236.0/25"
)
r_data = self.assert_create(
self.db_org_admin,
"ixpfx",
data,
test_failures={
"invalid": [{"prefix": "127.0.0.0/8"}, {"in_dfz": False}],
"perms": {
"prefix": "205.127.237.0/24",
"ixlan_id": SHARED["ixlan_r_ok"].id,
},
"status": {
"prefix": "205.127.237.0/24",
"ixlan_id": SHARED["ixlan_rw_pending"].id,
},
},
)
SHARED["ixpfx_id"] = r_data["id"]
self.assert_update(
self.db_org_admin,
"ixpfx",
SHARED["ixpfx_id"],
{"prefix": "206.127.236.0/26"},
test_failures={
"invalid": [{"prefix": "NEEDS TO BE VALID PREFIX"}, {"in_dfz": False}],
"perms": {"ixlan_id": SHARED["ixlan_r_ok"].id},
},
)
self.assert_delete(
self.db_org_admin,
"ixpfx",
test_success=SHARED["ixpfx_id"],
test_failure=SHARED["ixpfx_r_ok"].id,
)
# re-create deleted ixpfx
r_data = self.assert_create(self.db_org_admin, "ixpfx", data)
# re-delete
self.assert_delete(self.db_org_admin, "ixpfx", test_success=SHARED["ixpfx_id"])
# re-creating a deleted ixpfx that is under another exchange
# that we don't have write perms too
pfx = IXLanPrefix.objects.create(
ixlan=SHARED["ixlan_r_ok"], prefix="205.127.237.0/24", protocol="IPv4"
)
pfx.delete()
data.update(prefix="205.127.237.0/24")
r_data = self.assert_create(
self.db_org_admin,
"ixpfx",
data,
)
# make sure protocols are validated
r_data = self.assert_create(
self.db_org_admin,
"ixpfx",
data,
test_failures={
"invalid": {"prefix": "207.128.238.0/24", "protocol": "IPv6"},
},
test_success=False,
)
# test protected ixpfx cant be deleted
prefix = IXLanPrefix.objects.get(id=SHARED["ixpfx_id"])
NetworkIXLan.objects.create(
network=SHARED["net_rw_ok"],
asn=SHARED["net_rw_ok"].asn,
ixlan=SHARED["ixlan_rw_ok"],
ipaddr4=prefix.prefix[0],
status="ok",
speed=1000,
)
self.assert_delete(
self.db_org_admin, "ixpfx", test_protected=SHARED["ixpfx_id"]
)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_netixlan(self):
data = self.make_data_netixlan(
net_id=SHARED["net_rw_ok"].id,
ixlan_id=SHARED["ixlan_rw_ok"].id,
asn=SHARED["net_rw_ok"].asn,
)
r_data = self.assert_create(
self.db_org_admin,
"netixlan",
data,
test_failures={
"invalid": {"ipaddr4": "a b c"},
"perms": {
# set network to one the user doesnt have perms to
"ipaddr4": self.get_ip4(SHARED["ixlan_rw_ok"]),
"ipaddr6": self.get_ip6(SHARED["ixlan_rw_ok"]),
"net_id": SHARED["net_r_ok"].id,
},
},
)
assert r_data["operational"]
SHARED["netixlan_id"] = r_data.get("id")
self.assert_update(
self.db_org_admin,
"netixlan",
SHARED["netixlan_id"],
{"speed": 2000},
test_failures={
"invalid": {"ipaddr4": "NEEDS TO BE VALID IP"},
"perms": {"net_id": SHARED["net_r_ok"].id},
},
)
self.assert_delete(
self.db_org_admin,
"netixlan",
test_success=SHARED["netixlan_id"],
test_failure=SHARED["netixlan_r_ok"].id,
)
##########################################################################
def test_org_admin_002_POST_netixlan_no_net_contact(self):
network = SHARED["net_rw_ok"]
for poc in network.poc_set_active.all():
poc.delete()
data = self.make_data_netixlan(
net_id=SHARED["net_rw_ok"].id,
ixlan_id=SHARED["ixlan_rw_ok"].id,
asn=SHARED["net_rw_ok"].asn,
)
# When we create this netixlan it should fail with a
# non-field-error.
r_data = self.assert_create(
self.db_org_admin,
"netixlan",
data,
test_failures={"invalid": {"n/a": "n/a"}},
test_success=False,
)
# Undelete poc but blank email
poc = network.poc_set.first()
poc.status = "ok"
poc.email = ""
poc.visible = "Public"
poc.save()
network.refresh_from_db()
# Also fails with network contact that is
# missing an email
r_data = self.assert_create(
self.db_org_admin,
"netixlan",
data,
test_failures={"invalid": {"n/a": "n/a"}},
test_success=False,
)
##########################################################################
def test_org_admin_002_POST_PUT_netixlan_validation(self):
data = self.make_data_netixlan(
net_id=SHARED["net_rw_ok"].id, ixlan_id=SHARED["ixlan_rw_ok"].id
)
test_failures = [
# test failure if ip4 not in prefix
{"invalid": {"ipaddr4": self.get_ip4(SHARED["ixlan_r_ok"])}},
# test failure if ip6 not in prefix
{"invalid": {"ipaddr6": self.get_ip6(SHARED["ixlan_r_ok"])}},
# test failure if speed is below limit
{"invalid": {"speed": 1}},
# test failure if speed is above limit
{"invalid": {"speed": 1250000}},
# test failure if speed is None
{"invalid": {"speed": None}},
]
for test_failure in test_failures:
self.assert_create(
self.db_org_admin,
"netixlan",
data,
test_failures=test_failure,
test_success=False,
)
##########################################################################
def test_org_admin_002_POST_PUT_DELETE_ixfac(self):
data = {"fac_id": SHARED["fac_rw2_ok"].id, "ix_id": SHARED["ix_rw2_ok"].id}
r_data = self.assert_create(
self.db_org_admin,
"ixfac",
data,
test_failures={
"invalid": {"ix_id": ""},
"perms": {
# set network to one the user doesnt have perms to
"ix_id": SHARED["ix_r_ok"].id
},
"status": {
"fac_id": SHARED["fac_rw2_pending"].id,
"ix_id": SHARED["ix_rw2_pending"].id,
},
},
)
SHARED["ixfac_id"] = r_data.get("id")
self.assert_update(
self.db_org_admin,
"ixfac",
SHARED["ixfac_id"],
{"fac_id": SHARED["fac_r2_ok"].id},
test_failures={
"invalid": {"fac_id": ""},
"perms": {"ix_id": SHARED["ix_r_ok"].id},
},
)
self.assert_delete(
self.db_org_admin,
"ixfac",
test_success=SHARED["ixfac_id"],
test_failure=SHARED["ixfac_r_ok"].id,
)
##########################################################################
def test_org_admin_003_PUT_org(self):
self.assert_update(
self.db_org_admin,
"org",
SHARED["org_rw_ok"].id,
{"name": self.make_name("Test")},
test_failures={
"invalid": {"name": ""},
"perms": {"id": SHARED["org_r_ok"].id},
},
)
##########################################################################
def test_zz_org_admin_004_DELETE_org(self):
org = Organization.objects.create(name="Deletable org", status="ok")
org.admin_usergroup.user_set.add(self.user_org_admin)
self.assert_delete(
self.db_org_admin,
"org",
# can delete the org we just made
test_success=org.id,
# cant delete the org we don't have write perms to
test_failure=SHARED["org_r_ok"].id,
)
self.assert_delete(
self.db_org_admin,
"org",
# cant delete the org that we have write perms to
# but is not empty
test_failure=SHARED["org_rw_ok"].id,
)
##########################################################################
# GUEST TESTS
##########################################################################
def test_guest_001_GET_org(self):
self.assert_get_handleref(self.db_guest, "org", SHARED["org_r_ok"].id)
##########################################################################
def test_guest_001_GET_net(self):
data = self.assert_get_handleref(self.db_guest, "net", SHARED["net_r_ok"].id)
for poc in data.get("poc_set"):
self.assertEqual(poc["visible"], "Public")
##########################################################################
def test_guest_001_GET_ix(self):
self.assert_get_handleref(self.db_guest, "ix", SHARED["ix_r_ok"].id)
##########################################################################
def test_guest_001_GET_fac(self):
self.assert_get_handleref(self.db_guest, "fac", SHARED["fac_r_ok"].id)
##########################################################################
def test_guest_001_GET_poc_private(self):
self.assert_get_forbidden(self.db_guest, "poc", SHARED["poc_r_ok_private"].id)
##########################################################################
def test_guest_001_GET_poc_users(self):
self.assert_get_forbidden(self.db_guest, "poc", SHARED["poc_r_ok_users"].id)
##########################################################################
def test_guest_001_GET_poc_public(self):
self.assert_get_handleref(self.db_guest, "poc", SHARED["poc_r_ok_public"].id)
##########################################################################
def test_guest_001_GET_nefac(self):
self.assert_get_handleref(self.db_guest, "netfac", SHARED["netfac_r_ok"].id)
##########################################################################
def test_guest_001_GET_netixlan(self):
self.assert_get_handleref(self.db_guest, "netixlan", SHARED["netixlan_r_ok"].id)
##########################################################################
def test_guest_001_GET_ixfac(self):
self.assert_get_handleref(self.db_guest, "ixfac", SHARED["ixfac_r_ok"].id)
##########################################################################
def test_guest_001_GET_ixlan(self):
self.assert_get_handleref(self.db_guest, "ixlan", SHARED["ixlan_r_ok"].id)
##########################################################################
def test_guest_001_GET_ixlan_ixf_ixp_member_list_url(self):
for ixlan in self.db_guest.all(
"ixlan", ixf_ixp_member_list_url__startswith="http"
):
if ixlan["ixf_ixp_member_list_url_visible"] == "Public":
assert ixlan["ixf_ixp_member_list_url"] == "http://localhost"
else:
assert "ixf_ixp_member_list_url" not in ixlan
##########################################################################
def test_guest_001_GET_ixpfx(self):
self.assert_get_handleref(self.db_guest, "ixpfx", SHARED["ixpfx_r_ok"].id)
##########################################################################
def test_guest_001_GET_list_404(self):
for tag in REFTAG_MAP:
with pytest.raises(NotFoundException):
data = self.db_guest.all(tag, limit=1, id=99999999)
if tag == "net":
with pytest.raises(NotFoundException):
data = self.db_guest.all(tag, limit=1, asn=99999999999)
for tag in REFTAG_MAP:
if tag == "poc":
data = self.db_guest.all(tag, id=SHARED["poc_r_ok_public"].id)
else:
data = self.db_guest.all(tag, id=SHARED["%s_r_ok" % tag].id)
self.assertEqual(len(data), 1)
self.assert_handleref_integrity(data[0])
##########################################################################
def test_guest_005_list_all(self):
data = self.db_guest.all("org")
self.assertGreater(len(data), 1)
self.assert_handleref_integrity(data[0])
self.assert_data_integrity(data[0], "org")
##########################################################################
def test_guest_005_list_all_tags(self):
for tag in REFTAG_MAP:
if tag == "poc":
continue
data = self.db_guest.all(tag, limit=10)
self.assertLess(len(data), 11)
self.assert_handleref_integrity(data[0])
data = self.db_guest.all("poc", limit=10, visible="Public")
self.assertLess(len(data), 11)
self.assert_handleref_integrity(data[0])
##########################################################################
def test_org_admin_005_list(self):
for tag in REFTAG_MAP:
data = self.db_org_admin.all(tag, limit=10)
self.assertLess(len(data), 11)
self.assert_handleref_integrity(data[0])
for row in data:
self.assertEqual(row["status"], "ok")
##########################################################################
def test_guest_005_fields_filter(self):
data = self.db_guest.all("org", limit=10, fields=",".join(["name", "status"]))
self.assertGreater(len(data), 0)
for row in data:
self.assertEqual(sorted(row.keys()), sorted(["name", "status"]))
data = self.db_guest.get(
"org", Organization.objects.first().id, fields=",".join(["name", "status"])
)
self.assertGreater(len(data), 0)
self.assertEqual(sorted(data[0].keys()), sorted(["name", "status"]))
##########################################################################
def test_guest_005_ixlan_fields_filter(self):
"""
Tests the specific issue of #829 where a get to an ixlan
with fields parameter set would raise a 500 error for
unauthenticated users
"""
data = self.db_guest.get(
"ixlan", SHARED["ixlan_rw_ok"].id, fields="ixpfx_set", depth=2
)
assert len(data) == 1
row = data[0]
assert list(row.keys()) == ["ixpfx_set"]
##########################################################################
def test_guest_005_list_limit(self):
data = self.db_guest.all("org", limit=10)
self.assertEqual(len(data), 10)
self.assert_handleref_integrity(data[0])
self.assert_data_integrity(data[0], "org")
##########################################################################
def test_guest_005_list_pagination(self):
org_ids = [org.id for org in Organization.objects.filter(status="ok")]
for n in range(0, 1):
data_a = self.db_guest.all("org", skip=n * 3, limit=3)
for i in range(n, n + 3):
assert data_a[i]["id"] == org_ids[i]
##########################################################################
def test_guest_005_list_since(self):
data = self.db_guest.all(
"net", since=int(START_TIMESTAMP) - 10, status="deleted"
)
self.assertEqual(len(data), 2)
self.assert_handleref_integrity(data[0])
self.assert_data_integrity(data[0], "net")
##########################################################################
def test_guest_005_get_depth_all(self):
"""
Test all end points single object GET with all valid depths
This also asserts data structure integrity for objects expanded
by the depth parameter
"""
for depth in [0, 1, 2, 3, 4]:
for tag, slz in list(REFTAG_MAP_SLZ.items()):
note_tag = f"({tag} {depth})"
if tag == "poc":
o = SHARED["%s_r_ok_public" % tag]
else:
o = SHARED["%s_r_ok" % tag]
data = self.db_guest.get(tag, o.id, depth=depth)
self.assertEqual(len(data), 1, msg="Data length %s" % note_tag)
pk_flds, n_flds = self.serializer_related_fields(slz)
obj = data[0]
self.assert_related_depth(
obj, slz, depth, depth, note_tag, typ="single"
)
##########################################################################
def test_guest_005_list_depth_all(self):
"""
Tests all end points multiple object GET with all valid depths
This also asserts data structure integrity for objects expanded
by the depth parameter
"""
for depth in [0, 1, 2, 3]:
for tag, slz in list(REFTAG_MAP_SLZ.items()):
note_tag = f"({tag} {depth})"
if tag == "poc":
o = SHARED["%s_r_ok_public" % tag]
else:
o = SHARED["%s_r_ok" % tag]
data = self.db_guest.all(tag, id=o.id, depth=depth)
self.assertEqual(len(data), 1, msg="Data length %s" % note_tag)
pk_flds, n_flds = self.serializer_related_fields(slz)
obj = data[0]
self.assert_related_depth(
obj, slz, depth, depth, note_tag, typ="listing"
)
##########################################################################
def test_guest_005_list_depth_not_set(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id)
self.assertEqual(data[0].get("net_set"), None)
##########################################################################
def test_guest_005_list_depth_0(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=0)
self.assertEqual(data[0].get("net_set"), None)
##########################################################################
def test_guest_005_list_depth_1(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=1)
self.assertEqual(len(data[0].get("net_set")), 3)
self.assertEqual(data[0].get("net_set")[0], SHARED["net_r_ok"].id)
self.assertEqual(data[0].get("net_set")[1], SHARED["net_r2_ok"].id)
self.assertEqual(data[0].get("net_set")[2], SHARED["net_r3_ok"].id)
#############################################################################
def test_guest_005_list_depth_2(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=2)
self.assertEqual(len(data[0].get("net_set")), 3)
obj = data[0].get("net_set")[0]
self.assertEqual(obj.get("id"), SHARED["net_r_ok"].id)
self.assert_data_integrity(obj, "net", ignore=["org_id"])
#############################################################################
def test_guest_005_list_depth_3(self):
data = self.db_guest.all("org", id=SHARED["org_r_ok"].id, depth=3)
self.assertEqual(len(data[0].get("net_set")), 3)
obj = data[0].get("net_set")[0]
self.assertEqual(obj.get("id"), SHARED["net_r_ok"].id)
self.assert_data_integrity(obj, "net", ignore=["org_id"])
obj = obj.get("netfac_set")
self.assertEqual(len(obj), 1)
self.assertEqual(obj[0], SHARED["netfac_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_dates_numeric(self):
for flt, ass in list(NUMERIC_TESTS.items()):
for fld in ["created", "updated"]:
if flt in ["gt", "gte"]:
DATE = DATES["yesterday"]
elif flt in ["lt"]:
DATE = DATES["tomorrow"]
else:
DATE = DATES["today"]
if flt:
kwargs = {f"{fld}__{flt}": DATE[1]}
else:
kwargs = {fld: DATE[1]}
data = self.db_guest.all("fac", limit=10, **kwargs)
self.assertGreater(
len(data), 0, msg=f"{fld}_{flt} - data length assertion"
)
for row in data:
self.assert_data_integrity(row, "fac")
try:
dt = datetime.datetime.strptime(
row[fld], "%Y-%m-%dT%H:%M:%SZ"
).date()
except ValueError:
dt = datetime.datetime.strptime(
row[fld], "%Y-%m-%dT%H:%M:%S.%fZ"
).date()
fnc = getattr(self, "assert%s" % ass)
fnc(
dt,
DATE[0],
msg="{}__{}: {}, {}".format(fld, flt, row[fld], DATE[1]),
)
##########################################################################
def test_guest_005_list_filter_numeric(self):
data = self.db_guest.all("net", asn=SHARED["net_r_ok"].asn)
self.assertEqual(len(data), 1)
self.assert_data_integrity(data[0], "net")
self.assertEqual(data[0]["asn"], SHARED["net_r_ok"].asn)
##########################################################################
def test_guest_005_list_filter_numeric_lte(self):
data = self.db_guest.all("fac", id__lte=SHARED["fac_rw_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertLessEqual(int(fac["id"]), SHARED["fac_rw_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_lt(self):
data = self.db_guest.all("fac", id__lt=SHARED["fac_rw_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertLess(int(fac["id"]), SHARED["fac_rw_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_gte(self):
data = self.db_guest.all("fac", id__gte=SHARED["fac_r_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertGreaterEqual(int(fac["id"]), SHARED["fac_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_gt(self):
data = self.db_guest.all("fac", id__gt=SHARED["fac_r_ok"].id)
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertGreater(int(fac["id"]), SHARED["fac_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_numeric_in(self):
ids = [SHARED["fac_r_ok"].id, SHARED["fac_rw_ok"].id]
data = self.db_guest.all("fac", id__in="%s,%s" % tuple(ids))
self.assertEqual(len(data), len(ids))
self.assert_data_integrity(data[0], "fac")
for fac in data:
self.assertIn(int(fac["id"]), ids)
##########################################################################
def test_guest_005_list_filter_string(self):
data = self.db_guest.all("ix", name=SHARED["ix_r_ok"].name)
self.assertEqual(len(data), 1)
self.assert_data_integrity(data[0], "ix")
self.assertEqual(data[0]["name"], SHARED["ix_r_ok"].name)
##########################################################################
def test_guest_005_list_filter_string_contains(self):
token = SHARED["ix_r_ok"].name[3:5]
data = self.db_guest.all("ix", name__contains=token.lower())
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "ix")
for ix in data:
self.assertIn(token, ix["name"])
##########################################################################
def test_guest_005_list_filter_string_startswith(self):
token = SHARED["ix_r_ok"].name[0:5]
data = self.db_guest.all("ix", name__startswith=token.lower())
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "ix")
for ix in data:
self.assertEqual(ix["name"][:5], token)
##########################################################################
def test_guest_005_list_filter_string_in(self):
cities = ["API Test:IX:RW:ok", "API Test:IX:R:ok"]
data = self.db_guest.all("ix", name__in="%s,%s" % tuple(cities))
self.assertGreater(len(data), 0)
self.assert_data_integrity(data[0], "ix")
for ix in data:
self.assertIn(ix["name"], cities)
##########################################################################
def test_guest_005_list_filter_relation_basic(self):
data = self.db_guest.all("ix", org_id=SHARED["ix_r_ok"].org_id)
self.assertEqual(len(data), 3)
self.assert_data_integrity(data[0], "ix")
self.assertEqual(data[0]["org_id"], SHARED["ix_r_ok"].org_id)
##########################################################################
def test_guest_005_list_filter_relation_basic_2(self):
data = self.db_guest.all("ix", org=SHARED["ix_r_ok"].org_id)
self.assertEqual(len(data), 3)
self.assert_data_integrity(data[0], "ix")
self.assertEqual(data[0]["org_id"], SHARED["ix_r_ok"].org_id)
##########################################################################
def test_guest_005_list_filter_relation_fld_xl(self):
data = self.db_guest.all("netixlan", net_id__lt=4)
for row in data:
self.assertLess(row["net_id"], 4)
##########################################################################
def test_guest_005_list_filter_relation_nested(self):
data = self.db_user.all("poc", net__asn=SHARED["net_r_ok"].asn)
self.assertEqual(len(data), 2)
for row in data:
self.assertEqual(row.get("net_id"), SHARED["net_r_ok"].id)
##########################################################################
def test_guest_005_list_poc(self):
data = self.db_guest.all("poc", limit=100)
for row in data:
self.assertEqual(row.get("visible"), "Public")
data = self.db_guest.all("poc", visible__in="Private,Users", limit=100)
self.assertEqual(0, len(data))
##########################################################################
def test_guest_005_list_filter_net_related(self):
self.assert_list_filter_related("net", "ix")
self.assert_list_filter_related("net", "ixlan")
self.assert_list_filter_related("net", "netixlan")
self.assert_list_filter_related("net", "netfac")
self.assert_list_filter_related("net", "fac")
self.assert_list_filter_related("net", "org")
##########################################################################
def test_guest_005_list_filter_net_not_ix(self):
ix = SHARED["ix_r_ok"]
data_a = self.db_guest.all("net", ix=ix.id)
data_b = self.db_guest.all("net", not_ix=ix.id)
self.assertGreater(len(data_a), 0)
self.assertGreater(len(data_b), 0)
for row_b in data_b:
for row_a in data_a:
self.assertNotEqual(row_a["id"], row_b["id"])
##########################################################################
def test_guest_005_list_filter_net_not_fac(self):
fac = SHARED["fac_r_ok"]
data_a = self.db_guest.all("net", fac=fac.id)
data_b = self.db_guest.all("net", not_fac=fac.id)
self.assertGreater(len(data_a), 0)
self.assertGreater(len(data_b), 0)
for row_b in data_b:
for row_a in data_a:
self.assertNotEqual(row_a["id"], row_b["id"])
##########################################################################
def test_guest_005_list_filter_ixpfx_related(self):
self.assert_list_filter_related("ixpfx", "ix")
self.assert_list_filter_related("ixpfx", "ixlan")
##########################################################################
def test_guest_005_list_filter_ixpfx_whereis(self):
ixpfx = SHARED["ixpfx_r_ok"]
ipaddr = "{}".format(ixpfx.prefix[0])
data = self.db_guest.all("ixpfx", whereis=ipaddr)
assert len(data) == 1
assert data[0]["id"] == ixpfx.id
##########################################################################
def test_guest_005_list_filter_ix_related(self):
self.assert_list_filter_related("ix", "ixlan")
self.assert_list_filter_related("ix", "ixfac")
self.assert_list_filter_related("ix", "fac")
self.assert_list_filter_related("ix", "net")
self.assert_list_filter_related("ix", "net", "asn")
self.assert_list_filter_related("ix", "org")
##########################################################################
def test_guest_005_list_filter_ix_ipblock(self):
prefix = str(SHARED["ixpfx_r_ok"].prefix)[:-3]
data = self.db_guest.all("ix", ipblock=prefix)
self.assertGreater(len(data), 0)
for row in data:
self.assertEqual(row["id"], SHARED["ix_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_ix_name_search(self):
data = self.db_guest.all("ix", name_search=SHARED["ix_r_ok"].name)
self.assertEqual(len(data), 1)
for row in data:
self.assertEqual(row["id"], SHARED["ix_r_ok"].id)
data = self.db_guest.all("ix", name_search=SHARED["ix_r_ok"].name_long)
self.assertEqual(len(data), 1)
for row in data:
self.assertEqual(row["id"], SHARED["ix_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_ix_asn_overlap(self):
# create three test networks
networks = [
Network.objects.create(status="ok", **self.make_data_net())
for i in range(0, 3)
]
# create two test exchanges
exchanges = [
InternetExchange.objects.create(status="ok", **self.make_data_ix())
for i in range(0, 2)
]
# collect ixlans
ixlans = [ix.ixlan for ix in exchanges]
# all three networks peer at first exchange
for net in networks:
NetworkIXLan.objects.create(
network=net, ixlan=ixlans[0], status="ok", asn=net.asn, speed=0
)
# only the first two networks peer at second exchange
for net in networks[:2]:
NetworkIXLan.objects.create(
network=net, ixlan=ixlans[1], status="ok", asn=net.asn, speed=0
)
# do test queries
# query #1 - test overlapping exchanges for all 3 asns - should return first ix
data = self.db_guest.all(
"ix", asn_overlap=",".join([str(net.asn) for net in networks])
)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["id"], exchanges[0].id)
# query #2 - test overlapping exchanges for first 2 asns - should return both ixs
data = self.db_guest.all(
"ix", asn_overlap=",".join([str(net.asn) for net in networks[:2]])
)
self.assertEqual(len(data), 2)
for row in data:
self.assertIn(row["id"], [ix.id for ix in exchanges])
# query #3 - should error when only passing one asn
with pytest.raises(InvalidRequestException):
self.db_guest.all("ix", asn_overlap=networks[0].asn)
# query #4 - should error when passing too many asns
with pytest.raises(InvalidRequestException):
self.db_guest.all(
"ix", asn_overlap=",".join([str(i) for i in range(0, 30)])
)
# clean up data
for net in networks:
net.delete(hard=True)
for ix in exchanges:
ix.delete(hard=True)
##########################################################################
def test_guest_005_list_filter_fac_related(self):
self.assert_list_filter_related("fac", "ix")
self.assert_list_filter_related("fac", "net")
##########################################################################
def test_guest_005_list_filter_fac_org_name(self):
data = self.db_guest.all("fac", org_name=SHARED["org_r_ok"].name[2:10])
for row in data:
self.assertEqual(data[0]["org_id"], SHARED["org_r_ok"].id)
self.assert_data_integrity(data[0], "fac")
##########################################################################
def test_guest_005_list_filter_fac_net_count(self):
data = self.db_guest.all("fac", net_count=1)
for row in data:
self.assert_data_integrity(row, "fac")
self.assertEqual(row["net_count"], 1)
data = self.db_guest.all("fac", net_count=0)
for row in data:
self.assert_data_integrity(row, "fac")
self.assertEqual(row["net_count"], 0)
data = self.db_guest.all("fac", net_count__lt=1)
for row in data:
self.assert_data_integrity(row, "fac")
self.assertEqual(row["net_count"], 0)
data = self.db_guest.all("fac", net_count__gt=0)
for row in data:
self.assert_data_integrity(row, "fac")
self.assertGreater(row["net_count"], 0)
##########################################################################
def test_guest_005_list_filter_ix_net_count(self):
data = self.db_guest.all("ix", net_count=1)
for row in data:
self.assert_data_integrity(row, "ix")
self.assertEqual(row["net_count"], 1)
data = self.db_guest.all("ix", net_count=0)
for row in data:
self.assert_data_integrity(row, "ix")
self.assertEqual(row["net_count"], 0)
data = self.db_guest.all("ix", net_count__lt=1)
for row in data:
self.assert_data_integrity(row, "ix")
self.assertEqual(row["net_count"], 0)
data = self.db_guest.all("ix", net_count__gt=0)
for row in data:
self.assert_data_integrity(row, "ix")
self.assertGreater(row["net_count"], 0)
data = self.db_guest.all("ix", net_count__lte=2)
for row in data:
self.assert_data_integrity(row, "ix")
self.assertLessEqual(row["net_count"], 2)
data = self.db_guest.all("ix", net_count__gte=1)
for row in data:
self.assert_data_integrity(row, "ix")
self.assertGreaterEqual(row["net_count"], 1)
##########################################################################
def test_guest_005_list_filter_fac_asn_overlap(self):
# create three test networks
networks = [
Network.objects.create(status="ok", **self.make_data_net())
for i in range(0, 3)
]
# create two test facilities
facilities = [
Facility.objects.create(status="ok", **self.make_data_fac())
for i in range(0, 2)
]
# all three networks peer at first facility
for net in networks:
NetworkFacility.objects.create(
network=net, facility=facilities[0], status="ok"
)
# only the first two networks peer at second facility
for net in networks[:2]:
NetworkFacility.objects.create(
network=net, facility=facilities[1], status="ok"
)
# do test queries
# query #1 - test overlapping facilities for all 3 asns - should return first facility
data = self.db_guest.all(
"fac", asn_overlap=",".join([str(net.asn) for net in networks])
)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["id"], facilities[0].id)
# query #2 - test overlapping facilities for first 2 asns - should return both facs
data = self.db_guest.all(
"fac", asn_overlap=",".join([str(net.asn) for net in networks[:2]])
)
self.assertEqual(len(data), 2)
for row in data:
self.assertIn(row["id"], [ix.id for ix in facilities])
# query #3 - should error when only passing one asn
with pytest.raises(InvalidRequestException):
self.db_guest.all("fac", asn_overlap=networks[0].asn)
# query #4 - should error when passing too many asns
with pytest.raises(InvalidRequestException):
self.db_guest.all(
"fac", asn_overlap=",".join([str(i) for i in range(0, 30)])
)
# clean up data
for net in networks:
net.delete(hard=True)
for fac in facilities:
fac.delete(hard=True)
##########################################################################
def test_guest_005_list_filter_org_asn(self):
data = self.db_guest.all("org", asn=SHARED["net_r_ok"].asn)
self.assertEqual(len(data), 1)
for row in data:
self.assertEqual(row["id"], SHARED["org_r_ok"].id)
##########################################################################
def test_guest_005_list_filter_netixlan_related(self):
self.assert_list_filter_related("netixlan", "net")
self.assert_list_filter_related("netixlan", "ixlan")
self.assert_list_filter_related("netixlan", "ix")
##########################################################################
def test_guest_005_list_filter_netixlan_operational(self):
# all netixlans are operational at this point,
# filtering by operational=False should return empty list
data = self.db_guest.all("netixlan", operational=0)
assert len(data) == 0
# set one netixlan to not operational
netixlan = NetworkIXLan.objects.first()
netixlan.operational = False
netixlan.save()
# assert that it is now returned in the operational=False
# result
data = self.db_guest.all("netixlan", operational=0)
assert len(data) == 1
assert data[0]["id"] == netixlan.id
##########################################################################
def test_guest_005_list_filter_netixlan_related_name(self):
data = self.db_guest.all("netixlan", name=SHARED["ix_rw_ok"].name)
self.assertEqual(len(data), 1)
self.assert_data_integrity(data[0], "netixlan")
##########################################################################
def test_guest_005_list_filter_netfac_related(self):
self.assert_list_filter_related("netfac", "net")
self.assert_list_filter_related("netfac", "fac")
##########################################################################
def test_guest_005_list_filter_netfac_related_name(self):
data = self.db_guest.all("netfac", name=SHARED["fac_rw_ok"].name)
self.assertEqual(len(data), 1)
self.assert_data_integrity(data[0], "netfac")
##########################################################################
def test_guest_005_list_filter_netfac_related_city(self):
data = self.db_guest.all("netfac", city=SHARED["fac_rw_ok"].city)
self.assertEqual(len(data), 2)
self.assert_data_integrity(data[0], "netfac")
##########################################################################
def test_guest_005_list_filter_netfac_related_country(self):
data = self.db_guest.all(
"netfac", country="{}".format(SHARED["fac_rw_ok"].country)
)
self.assertEqual(len(data), 2)
self.assert_data_integrity(data[0], "netfac")
##########################################################################
def test_guest_005_list_filter_ixlan_related(self):
self.assert_list_filter_related("ixlan", "ix")
##########################################################################
def test_guest_005_list_filter_ixfac_related(self):
self.assert_list_filter_related("ixfac", "fac")
self.assert_list_filter_related("ixfac", "ix")
##########################################################################
def test_guest_005_list_filter_poc_related(self):
self.assert_list_filter_related("poc", "net")
return
data = self.db_guest.all("poc", net_id=SHARED["net_r_ok"].id)
self.assertGreater(len(data), 0)
for row in data:
self.assert_data_integrity(row, "poc")
self.assertEqual(row["net_id"], SHARED["net_r_ok"].id)
##########################################################################
def test_guest_005_list_skip(self):
data = self.db_guest.all("org", skip=0, limit=20)
self.assertEqual(len(data), 20)
target = data[10]
data = self.db_guest.all("org", skip=10, limit=20)
self.assertEqual(len(data), 20)
comp = data[0]
self.assertEqual(target, comp)
##########################################################################
def test_guest_005_list_filter_accented(self):
"""
test filtering with accented search terms
"""
# TODO: sqlite3 is being used as the testing backend, and django 1.11
# seems to be unable to set a collation on it, so we can't properly test
# the other way atm, for now this test at least confirms that the term is
# unaccented correctly.
#
# on production we run mysql with flattened accents so both ways should work
# there regardless.
org = Organization.objects.create(name="org unaccented", status="ok")
net = Network.objects.create(
asn=12345, name="net unaccented", status="ok", org=org
)
ix = InternetExchange.objects.create(org=org, name="ix unaccented", status="ok")
fac = Facility.objects.create(org=org, name="fac unaccented", status="ok")
for tag in ["org", "net", "ix", "fac"]:
data = self.db_guest.all(tag, name=f"{tag} unãccented")
self.assertEqual(len(data), 1)
##########################################################################
# READONLY PERMISSION TESTS
# These tests assert that the readonly users cannot write anything
##########################################################################
##########################################################################
def test_readonly_users_003_PUT_org(self):
for db in self.readonly_dbs():
self.assert_update(
db,
"org",
SHARED["org_r_ok"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
##########################################################################
def test_readonly_users_002_POST_ix(self):
for db in self.readonly_dbs():
self.assert_create(
db,
"ix",
self.make_data_ix(prefix=self.get_prefix4()),
test_failures={"perms": {}},
test_success=False,
)
##########################################################################
def test_readonly_users_003_PUT_ix(self):
for db in self.readonly_dbs():
self.assert_update(
db,
"ix",
SHARED["ix_r_ok"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
##########################################################################
def test_readonly_users_004_DELETE_ix(self):
for db in self.readonly_dbs():
self.assert_delete(
db, "ix", test_success=False, test_failure=SHARED["ix_r_ok"].id
)
##########################################################################
def test_readonly_users_002_POST_fac(self):
for db in self.readonly_dbs():
self.assert_create(
db,
"fac",
self.make_data_fac(),
test_failures={"perms": {}},
test_success=False,
)
##########################################################################
def test_readonly_users_003_PUT_fac(self):
for db in self.readonly_dbs():
self.assert_update(
db,
"fac",
SHARED["fac_r_ok"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
##########################################################################
def test_readonly_users_004_DELETE_fac(self):
for db in self.readonly_dbs():
self.assert_delete(
db, "fac", test_success=False, test_failure=SHARED["fac_r_ok"].id
)
##########################################################################
def test_readonly_users_002_POST_netfac(self):
for db in self.readonly_dbs():
self.assert_create(
db,
"netfac",
{
"net_id": SHARED["net_r_ok"].id,
"fac_id": SHARED["fac_r2_ok"].id,
},
test_failures={"perms": {}},
test_success=False,
)
##########################################################################
def test_readonly_users_003_PUT_netfac(self):
for db in self.readonly_dbs():
self.assert_update(
db,
"netfac",
SHARED["netfac_r_ok"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
##########################################################################
def test_readonly_users_004_DELETE_netfac(self):
for db in self.readonly_dbs():
self.assert_delete(
db, "netfac", test_success=False, test_failure=SHARED["netfac_r_ok"].id
)
##########################################################################
def test_readonly_users_002_POST_ixfac(self):
for db in self.readonly_dbs():
self.assert_create(
db,
"ixfac",
{"ix_id": SHARED["ix_r_ok"].id, "fac_id": SHARED["fac_r2_ok"].id},
test_failures={"perms": {}},
test_success=False,
)
##########################################################################
def test_readonly_users_003_PUT_ixfac(self):
for db in self.readonly_dbs():
self.assert_update(
db,
"ixfac",
SHARED["ixfac_r_ok"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
##########################################################################
def test_readonly_users_004_DELETE_ixfac(self):
for db in self.readonly_dbs():
self.assert_delete(
db, "ixfac", test_success=False, test_failure=SHARED["ixfac_r_ok"].id
)
##########################################################################
def test_readonly_users_002_POST_poc(self):
for db in self.readonly_dbs():
self.assert_create(
db,
"poc",
self.make_data_poc(net_id=SHARED["net_rw_ok"].id),
test_failures={"perms": {}},
test_success=False,
)
##########################################################################
def test_readonly_users_003_PUT_poc(self):
for db in self.readonly_dbs(exclude=[self.db_user]):
self.assert_update(
db,
"poc",
SHARED["poc_r_ok_public"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
self.assert_update(
db,
"poc",
SHARED["poc_r_ok_private"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
self.assert_update(
db,
"poc",
SHARED["poc_r_ok_users"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
##########################################################################
def test_readonly_users_004_DELETE_poc(self):
for db in self.readonly_dbs():
self.assert_delete(
db, "poc", test_success=False, test_failure=SHARED["poc_r_ok_public"].id
)
self.assert_delete(
db,
"poc",
test_success=False,
test_failure=SHARED["poc_r_ok_private"].id,
)
self.assert_delete(
db, "poc", test_success=False, test_failure=SHARED["poc_r_ok_users"].id
)
##########################################################################
def test_readonly_users_002_POST_ixlan(self):
for db in self.readonly_dbs():
with self.assertRaises(Exception) as exc:
self.assert_create(
db,
"ixlan",
self.make_data_ixlan(),
test_failures={"perms": {}},
test_success=False,
)
self.assertIn('Method "POST" not allowed', str(exc.exception))
##########################################################################
def test_readonly_users_003_PUT_ixlan(self):
for db in self.readonly_dbs():
self.assert_update(
db,
"ixlan",
SHARED["ixlan_r_ok"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
##########################################################################
def test_readonly_users_004_DELETE_ixlan(self):
for db in self.readonly_dbs():
with self.assertRaises(Exception) as exc:
self.assert_delete(
db,
"ixlan",
test_success=False,
test_failure=SHARED["ixlan_r_ok"].id,
)
self.assertIn('Method "DELETE" not allowed', str(exc.exception))
##########################################################################
def test_readonly_users_002_POST_ixpfx(self):
for db in self.readonly_dbs():
self.assert_create(
db,
"ixpfx",
self.make_data_ixpfx(prefix="200.100.200.0/22"),
test_failures={"perms": {}},
test_success=False,
)
##########################################################################
def test_readonly_users_003_PUT_ixpfx(self):
for db in self.readonly_dbs():
self.assert_update(
db,
"ixpfx",
SHARED["ixpfx_r_ok"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
##########################################################################
def test_readonly_users_004_DELETE_ixpfx(self):
for db in self.readonly_dbs():
self.assert_delete(
db, "ixpfx", test_success=False, test_failure=SHARED["ixpfx_r_ok"].id
)
##########################################################################
def test_readonly_users_002_POST_netixlan(self):
for db in self.readonly_dbs():
self.assert_create(
db,
"netixlan",
self.make_data_netixlan(),
test_failures={"perms": {}},
test_success=False,
)
##########################################################################
def test_readonly_users_003_PUT_netixlan(self):
for db in self.readonly_dbs():
self.assert_update(
db,
"netixlan",
SHARED["netixlan_r_ok"].id,
{},
test_success=False,
test_failures={"perms": {}},
)
##########################################################################
def test_readonly_users_004_DELETE_netixlan(self):
for db in self.readonly_dbs():
self.assert_delete(
db,
"netixlan",
test_success=False,
test_failure=SHARED["netixlan_r_ok"].id,
)
##########################################################################
def test_readonly_users_004_DELETE_org(self):
for db in self.readonly_dbs():
self.assert_delete(
db, "org", test_success=False, test_failure=SHARED["org_r_ok"].id
)
##########################################################################
# CRUD PERMISSION TESTS
##########################################################################
def test_z_crud_002_create(self):
# user with create perms should be allowed to create a new poc under net_rw3_ok
# but not under net_rw2_ok
self.assert_create(
self.db_crud_create,
"poc",
self.make_data_poc(net_id=SHARED["net_rw3_ok"].id),
test_failures={"perms": {"net_id": SHARED["net_rw2_ok"].id}},
)
# other crud test users should not be able to create a new poc under
# net_rw3_ok
for p in ["delete", "update"]:
self.assert_create(
getattr(self, "db_crud_%s" % p),
"poc",
self.make_data_poc(net_id=SHARED["net_rw3_ok"].id),
test_failures={"perms": {}},
test_success=False,
)
def test_z_crud_003_update(self):
# user with update perms should be allowed to update net_rw3_ok
# but not net_rw2_ok
self.assert_update(
self.db_crud_update,
"net",
SHARED["net_rw3_ok"].id,
{"name": self.make_name("Test")},
test_failures={"perms": {"id": SHARED["net_rw2_ok"].id}},
)
# user with update perms should not be allowed to update ix_rw3_ok
self.assert_update(
self.db_crud_update,
"ix",
SHARED["ix_rw3_ok"].id,
{"name": self.make_name("Test")},
test_failures={"perms": {}},
test_success=False,
)
# other crud test users should not be able to update net_rw3_ok
for p in ["delete", "create"]:
self.assert_update(
getattr(self, "db_crud_%s" % p),
"net",
SHARED["net_rw3_ok"].id,
{"name": self.make_name("Test")},
test_failures={"perms": {}},
test_success=False,
)
def test_z_crud_004_delete(self):
# other crud test users should not be able to delete net_rw3_ok
for p in ["update", "create"]:
self.assert_delete(
getattr(self, "db_crud_%s" % p),
"net",
test_success=False,
test_failure=SHARED["net_rw3_ok"].id,
)
# user with delete perms should be allowed to update net_rw3_ok
# but not net_rw2_ok
self.assert_delete(
self.db_crud_delete,
"net",
SHARED["net_rw3_ok"].id,
test_failure=SHARED["net_rw2_ok"].id,
)
# user with delete perms should not be allowed to delete ix_rw3_ok
self.assert_delete(
self.db_crud_delete,
"ix",
test_success=False,
test_failure=SHARED["ix_rw3_ok"].id,
)
##########################################################################
# MISC TESTS
##########################################################################
def _test_GET_ixf_ixp_member_list_url(self, db, tests=[], suffix="r"):
ixlan = SHARED[f"ixlan_{suffix}_ok"]
ixlan.ixf_ixp_member_list_url = "http://localhost"
ixlan.save()
for visible, expected in tests:
ixlan.ixf_ixp_member_list_url_visible = visible
ixlan.full_clean()
ixlan.save()
data = db.get("ixlan", id=ixlan.id)[0]
assert data["ixf_ixp_member_list_url_visible"] == visible
if expected:
assert data["ixf_ixp_member_list_url"] == ixlan.ixf_ixp_member_list_url
else:
assert "ixf_ixp_member_list_url" not in data
def test_z_misc_GET_ixf_ixp_member_list_url(self):
"""
Test the visibility of ixlan.ixf_ixp_member_list_url for
Guest, User, Org member and org admin
"""
self._test_GET_ixf_ixp_member_list_url(
self.db_user, [("Private", False), ("Users", True), ("Public", True)]
)
self._test_GET_ixf_ixp_member_list_url(
self.db_guest, [("Private", False), ("Users", False), ("Public", True)]
)
self._test_GET_ixf_ixp_member_list_url(
self.db_org_member, [("Private", True), ("Users", True), ("Public", True)]
)
self._test_GET_ixf_ixp_member_list_url(
self.db_org_admin,
[("Private", True), ("Users", True), ("Public", True)],
suffix="rw",
)
def test_z_misc_POST_ix_fac_missing_phone_fields(self):
"""
Test that omitting the *_phone fields during fac
and ix object creation doesnt error 500
TODO: a test that drops all the non-required fields
and tests for every reftag model
"""
data = self.make_data_fac()
db = self.db_org_admin
del data["tech_phone"]
r = db.create("fac", data, return_response=True).get("data")
data = self.make_data_fac()
del data["sales_phone"]
r = db.create("fac", data, return_response=True).get("data")
data = self.make_data_ix(prefix=self.get_prefix4())
del data["tech_phone"]
r = db.create("ix", data, return_response=True).get("data")
data = self.make_data_ix(prefix=self.get_prefix4())
del data["policy_phone"]
r = db.create("ix", data, return_response=True).get("data")
def test_z_misc_002_dupe_netixlan_ip(self):
# test that addint duplicate netixlan ips is impossible
A = SHARED["netixlan_rw_ok"]
self.assert_create(
self.db_org_admin,
"netixlan",
self.make_data_netixlan(ixlan_id=A.ixlan_id, net_id=A.network_id),
test_success=False,
test_failures={"invalid": {"ipaddr4": str(A.ipaddr4)}},
)
self.assert_create(
self.db_org_admin,
"netixlan",
self.make_data_netixlan(
ixlan_id=A.ixlan_id,
net_id=A.network_id,
),
test_success=False,
test_failures={"invalid": {"ipaddr6": str(A.ipaddr6)}},
)
def test_z_misc_002_local_asn(self):
# test that local_asn gets enforced (#186)
net = SHARED["net_rw_ok"]
fac = SHARED["fac_rw_ok"]
ixlan = SHARED["ixlan_rw_ok"]
# test netfac create without asn sent (should auto set)
data = {"net_id": net.id, "fac_id": fac.id}
r_data = self.db_org_admin.create("netfac", data, return_response=True).get(
"data"
)[0]
assert r_data["local_asn"] == net.asn
NetworkFacility.objects.get(id=r_data["id"]).delete()
# test nefac create with local_asn sent (should ignore and auto set)
data = {"net_id": net.id, "fac_id": fac.id, "local_asn": 12345}
r_data = self.db_org_admin.create("netfac", data, return_response=True).get(
"data"
)[0]
assert r_data["local_asn"] == net.asn
NetworkFacility.objects.get(id=r_data["id"]).delete()
# test netixlan create without asn sent (should auto set)
data = self.make_data_netixlan(ixlan_id=ixlan.id, net_id=net.id)
del data["asn"]
r_data = self.db_org_admin.create("netixlan", data, return_response=True).get(
"data"
)[0]
assert r_data["asn"] == net.asn
NetworkIXLan.objects.get(id=r_data["id"]).delete()
# test neixlan create with asn sent (should ignore and auto set)
data = self.make_data_netixlan(ixlan_id=ixlan.id, net_id=net.id, asn=12345)
r_data = self.db_org_admin.create("netixlan", data, return_response=True).get(
"data"
)[0]
assert r_data["asn"] == net.asn
NetworkIXLan.objects.get(id=r_data["id"]).delete()
def test_z_misc_002_dupe_name_update(self):
# test that changing the name of entity A (status=ok)
# to name of entity B (status=deleted) does raise the approporiate
# unique key error and does not undelete entity B
A = SHARED["fac_rw_dupe_ok"]
B = SHARED["fac_rw_dupe_deleted"]
self.assertEqual(A.status, "ok")
self.assertEqual(B.status, "deleted")
self.assert_update(
self.db_org_admin,
"fac",
A.id,
{},
test_failures={"invalid": {"name": B.name}},
)
B.refresh_from_db()
self.assertEqual(B.status, "deleted")
def test_z_misc_001_ix_phone_number_validation(self):
data = self.make_data_ix(org_id=SHARED["org_rw_ok"].id)
# test that valid number comes back properly formatted
data.update(
prefix=PREFIXES_V4[-1],
tech_phone="+1 206 555 0199",
policy_phone="+1 206 555 0199",
)
r_data = self.db_org_admin.create("ix", data, return_response=True).get("data")[
0
]
assert r_data["tech_phone"] == "+12065550199"
assert r_data["policy_phone"] == "+12065550199"
# test that invalid numbers raise validation errors
self.assert_update(
self.db_org_admin,
"ix",
r_data["id"],
{},
test_failures={"invalid": {"tech_phone": "invalid number"}},
)
self.assert_update(
self.db_org_admin,
"ix",
r_data["id"],
{},
test_failures={"invalid": {"policy_phone": "invalid number"}},
)
def test_z_misc_001_poc_phone_number_validation(self):
data = self.make_data_poc(net_id=SHARED["net_rw_ok"].id)
# test that valid number comes back properly formatted
data.update(phone="+1 206 555 0199")
r_data = self.db_org_admin.create("poc", data, return_response=True).get(
"data"
)[0]
assert r_data["phone"] == "+12065550199"
# test that invalid numbers raise validation errors
self.assert_update(
self.db_org_admin,
"poc",
r_data["id"],
{},
test_failures={"invalid": {"phone": "invalid number"}},
)
def test_z_misc_001_org_create(self):
# no one should be allowed to create an org via the api
# at this point in time
for db in self.all_dbs():
self.assert_create(
db,
"org",
self.make_data_org(name=self.make_name("Test")),
test_success=False,
test_failures={"perms": {}},
)
def test_z_misc_001_suggest_net(self):
# test network suggestions
data = self.make_data_net(
asn=9000901, org_id=settings.SUGGEST_ENTITY_ORG, suggest=True
)
r_data = self.assert_create(self.db_user, "net", data)
self.assertEqual(r_data["org_id"], settings.SUGGEST_ENTITY_ORG)
self.assertEqual(r_data["status"], "pending")
net = Network.objects.get(id=r_data["id"])
self.assertEqual(net.org_id, settings.SUGGEST_ENTITY_ORG)
data = self.make_data_net(
asn=9000902, org_id=settings.SUGGEST_ENTITY_ORG, suggest=True
)
r_data = self.assert_create(
self.db_guest, "net", data, test_success=False, test_failures={"perms": {}}
)
def test_z_misc_001_suggest_fac(self):
# test facility suggestions
data = self.make_data_fac(org_id=settings.SUGGEST_ENTITY_ORG, suggest=True)
r_data = self.assert_create(self.db_user, "fac", data)
self.assertEqual(r_data["org_id"], settings.SUGGEST_ENTITY_ORG)
self.assertEqual(r_data["status"], "pending")
fac = Facility.objects.get(id=r_data["id"])
self.assertEqual(fac.org_id, settings.SUGGEST_ENTITY_ORG)
data = self.make_data_fac(org_id=settings.SUGGEST_ENTITY_ORG, suggest=True)
r_data = self.assert_create(
self.db_guest, "fac", data, test_success=False, test_failures={"perms": {}}
)
def test_z_misc_001_suggest_ix(self):
# test exchange suggestions
data = self.make_data_ix(
org_id=settings.SUGGEST_ENTITY_ORG, suggest=True, prefix=self.get_prefix4()
)
r_data = self.assert_create(
self.db_user, "ix", data, ignore=["prefix", "suggest"]
)
self.assertEqual(r_data["org_id"], settings.SUGGEST_ENTITY_ORG)
self.assertEqual(r_data["status"], "pending")
ix = InternetExchange.objects.get(id=r_data["id"])
self.assertEqual(ix.org_id, settings.SUGGEST_ENTITY_ORG)
data = self.make_data_ix(
org_id=settings.SUGGEST_ENTITY_ORG, suggest=True, prefix=self.get_prefix4()
)
r_data = self.assert_create(
self.db_guest,
"ix",
data,
ignore=["prefix", "suggest"],
test_success=False,
test_failures={"perms": {}},
)
def test_z_misc_001_suggest_outside_of_post(self):
# The `suggest` keyword should only be allowed for
# `POST` events
for reftag in ["ix", "fac", "net"]:
ent = SHARED[f"{reftag}_rw_ok"]
org_id = ent.org_id
self.assert_update(
self.db_org_admin,
reftag,
ent.id,
{"notes": "bla"},
test_failures={"invalid": {"suggest": True}},
)
ent.refresh_from_db()
self.assertEqual(ent.org_id, org_id)
def test_z_misc_001_fac_address_geocode(self):
# test that facility gets marked for geocode sync after address field
# change
fac = SHARED["fac_rw_ok"]
fac.geocode_status = True
fac.save()
self.assert_update(
self.db_org_admin, "fac", fac.id, {"address1": "This is a test"}
)
fac.refresh_from_db()
self.assertEqual(fac.geocode_status, False)
# reset geocode status
fac.geocode_status = True
fac.save()
# test that facility does NOT get marked for geocode sync after non relevant
# fields are changed
self.assert_update(
self.db_org_admin,
"fac",
fac.id,
{"website": "http://example.com", "name": fac.name + " Geocode Test"},
)
fac.refresh_from_db()
self.assertEqual(fac.geocode_status, True)
def test_z_misc_001_api_errors(self):
"""
Test empty POST, PUT data error response
Test parse error POST, PUT data error response
"""
for reftag in list(REFTAG_MAP.keys()):
self._test_z_misc_001_api_errors(reftag, "post", "create")
self._test_z_misc_001_api_errors(reftag, "put", "update")
def _test_z_misc_001_api_errors(self, reftag, method, action):
factory = APIRequestFactory()
url = f"/{reftag}/"
view_action = {method: action}
view = NetworkViewSet.as_view(view_action)
fn = getattr(factory, method)
ERR_PARSE = "Data supplied with the {} request could not be parsed: JSON parse error - Expecting value: line 1 column 1 (char 0)".format(
method.upper()
)
ERR_MISSING = f"No data was supplied with the {method.upper()} request"
# test posting invalid json error
request = fn(url, "in{valid json", content_type="application/json")
response = view(request)
response.render()
assert json.loads(response.content)["meta"]["error"] == ERR_PARSE
# test posting empty json error
request = fn("/net/", "{}", content_type="application/json")
response = view(request)
response.render()
assert json.loads(response.content)["meta"]["error"] == ERR_MISSING
# test posting empty json error
request = fn("/net/", "", content_type="application/json")
response = view(request)
response.render()
assert json.loads(response.content)["meta"]["error"] == ERR_MISSING
class Command(BaseCommand):
help = "This runs the api test harness. All write ops are performed under an organization specifically made for testing, so running to against a prod environment should be fine in theory."
def add_arguments(self, parser):
parser.add_argument("--only", help="only run this test", dest="only")
parser.add_argument(
"--setup",
help="runs api test setup (user, org create) only",
dest="setup",
action="store_true",
)
@classmethod
def log(cls, msg):
print(msg)
@classmethod
def create_entity(
cls, model, prefix="rw", unset=[], key_suffix=None, name_suffix=None, **kwargs
):
tag = model.handleref.tag
status = kwargs.get("status", "ok")
name = f"API Test:{tag.upper()}:{prefix.upper()}:{status}"
if name_suffix:
name = f"{name}{name_suffix}"
data = {"status": status}
if tag in ["ix", "net", "fac", "org"]:
data["name"] = name
if tag == "ixpfx":
if kwargs.get("protocol", 4) == 4:
data["prefix"] = PREFIXES_V4[model.objects.all().count()]
elif kwargs.get("protocol") == 6:
data["prefix"] = PREFIXES_V6[model.objects.all().count()]
data.update(**kwargs)
try:
obj = model.objects.get(**data)
cls.log(
"%s with status '%s' for %s testing already exists, skipping!"
% (tag.upper(), status, prefix.upper())
)
except model.DoesNotExist:
fn = getattr(TestJSON, "make_data_%s" % tag, None)
if fn:
data = fn(**data)
for k in unset:
if k in data:
del data[k]
obj = model(**data)
obj.save()
cls.log(
"%s with status '%s' for %s testing created! (%s)"
% (tag.upper(), status, prefix.upper(), obj.updated)
)
id = f"{tag}_{prefix}_{status}"
if key_suffix:
id = f"{id}_{key_suffix}"
SHARED[id] = obj
return obj
@classmethod
def create_user(cls, USER):
try:
user = User.objects.get(username=USER.get("user"))
cls.log("USER '%s' already exists, skipping!" % USER.get("user"))
user.groups.clear()
user.grainy_permissions.all().delete()
except User.DoesNotExist:
user = User.objects.create(username=USER.get("user"))
user.set_password(USER.get("password"))
user.save()
cls.log("USER '%s' created!" % USER.get("user"))
return user
@classmethod
def prepare(cls, *args, **options):
cls.log("Running setup for API testing...")
memberGroup = Group.objects.get(name="user")
# create API test user
user = cls.create_user(USER)
memberGroup.user_set.add(user)
# create API test user org member
user_org_member = cls.create_user(USER_ORG_MEMBER)
memberGroup.user_set.add(user_org_member)
# create API test user org member
user_org_admin = cls.create_user(USER_ORG_ADMIN)
memberGroup.user_set.add(user_org_admin)
# create API test user for crud testing
crud_users = {}
for p, specs in list(USER_CRUD.items()):
crud_user = cls.create_user(specs)
crud_users[p] = crud_user
memberGroup.user_set.add(crud_user)
# see if we need to create extra organizations (to fill up the
# database)
extra_orgs = getattr(cls, "create_extra_orgs", 0)
i = 0
while i < extra_orgs:
cls.create_entity(Organization, prefix="r_%d" % i, status="ok")
i += 1
# create API test organization (read & write)
try:
org_rw = Organization.objects.get(name=ORG_RW)
cls.log("ORG for WRITE testing already exists, skipping!")
except Organization.DoesNotExist:
org_rw = Organization.objects.create(status="ok", name=ORG_RW)
cls.log("ORG for WRITE testing created!")
org_rw.admin_usergroup.user_set.add(user_org_admin)
for crud_user in list(crud_users.values()):
org_rw.usergroup.user_set.add(crud_user)
SHARED["org_id"] = org_rw.id
SHARED["org_rw"] = SHARED["org_rw_ok"] = org_rw
# create API test organization (read & write) - status pending
try:
org_rwp = Organization.objects.get(name=ORG_RW_PENDING)
cls.log(
"ORG for WRITE testing (with status pending) already exists, skipping!"
)
except Organization.DoesNotExist:
org_rwp = Organization.objects.create(status="pending", name=ORG_RW_PENDING)
cls.log("ORG for WRITE testing (with status pending) created!")
org_rwp.admin_usergroup.user_set.add(user_org_admin)
SHARED["org_rwp"] = SHARED["org_rw_pending"] = org_rwp
# create API test organization (read only)
try:
org_r = Organization.objects.get(name=ORG_R)
cls.log("ORG for READONLY testing already exists, skipping!")
except Organization.DoesNotExist:
org_r = Organization.objects.create(name=ORG_R, status="ok")
cls.log("ORG for READONLY testing created!")
org_r.usergroup.user_set.add(user_org_member)
SHARED["org_r"] = SHARED["org_r_ok"] = org_r
cls.create_entity(Organization, prefix="r", status="pending")
# create API test network (for status "deleted" tests)
try:
net_rd = Network.objects.get(name=NET_R_DELETED, org_id=org_r.id)
cls.log("NET for status 'deleted' testing already exists, skipping!")
except Network.DoesNotExist:
net_rd = Network.objects.create(
**TestJSON.make_data_net(name=NET_R_DELETED, org_id=org_r.id)
)
cls.log("NET for status 'deleted' testing created!")
net_rd.delete()
SHARED["net_rd"] = net_rd
# create various entities for rw testing
for model in [Network, Facility, InternetExchange]:
for status in ["ok", "pending"]:
for prefix in ["r", "rw"]:
cls.create_entity(
model,
status=status,
prefix=prefix,
org_id=SHARED[f"org_{prefix}_{status}"].id,
)
cls.create_entity(
model,
status=status,
prefix="%s2" % prefix,
org_id=SHARED[f"org_{prefix}_{status}"].id,
)
cls.create_entity(
model,
status=status,
prefix="%s3" % prefix,
org_id=SHARED[f"org_{prefix}_{status}"].id,
)
# create entities for duplicate validation testing
for model in [Network, Facility, InternetExchange]:
cls.create_entity(
model,
status="deleted",
prefix="rw_dupe",
name_suffix=" DUPE",
org_id=SHARED["org_rw_ok"].id,
)
cls.create_entity(
model,
status="ok",
prefix="rw_dupe",
name_suffix=" DUPE !",
org_id=SHARED["org_rw_ok"].id,
)
visibility = {
"rw": "Public",
"rw2": "Users",
"rw3": "Private",
"r": "Public",
"r2": "Users",
"r3": "Private",
}
for status in ["ok", "pending"]:
for prefix in ["r", "r2", "r3", "rw", "rw2", "rw3"]:
ixlan = SHARED[f"ixlan_{prefix}_{status}"] = SHARED[
f"ix_{prefix}_{status}"
].ixlan
if prefix in visibility:
visible = visibility[prefix]
ixlan.ixf_ixp_member_list_url_visible = visible
ixlan.ixf_ixp_member_list_url = "http://localhost"
ixlan.save()
for status in ["ok", "pending"]:
for prefix in ["r", "rw"]:
cls.create_entity(
IXLanPrefix,
status=status,
prefix=prefix,
protocol=4,
ixlan_id=SHARED[f"ixlan_{prefix}_{status}"].id,
)
cls.create_entity(
IXLanPrefix,
status=status,
prefix=f"{prefix}_v6",
protocol=6,
ixlan_id=SHARED[f"ixlan_{prefix}_{status}"].id,
)
cls.create_entity(
InternetExchangeFacility,
status=status,
prefix=prefix,
facility_id=SHARED[f"fac_{prefix}_{status}"].id,
ix_id=SHARED[f"ix_{prefix}_{status}"].id,
)
cls.create_entity(
NetworkFacility,
status=status,
prefix=prefix,
unset=["net_id"],
facility_id=SHARED[f"fac_{prefix}_{status}"].id,
network_id=SHARED[f"net_{prefix}_{status}"].id,
)
cls.create_entity(
NetworkIXLan,
status=status,
prefix=prefix,
unset=["net_id"],
ixlan_id=SHARED[f"ixlan_{prefix}_{status}"].id,
network_id=SHARED[f"net_{prefix}_{status}"].id,
)
for v in ["Private", "Users", "Public"]:
cls.create_entity(
NetworkContact,
status=status,
prefix=prefix,
visible=v,
network_id=SHARED[f"net_{prefix}_{status}"].id,
unset=["net_id"],
key_suffix=v.lower(),
)
# set up permissions for crud permission tests
crud_users["delete"].grainy_permissions.create(
namespace=SHARED["net_rw3_ok"].grainy_namespace,
permission=PERM_READ | PERM_DELETE,
)
crud_users["create"].grainy_permissions.create(
namespace=SHARED["net_rw3_ok"].grainy_namespace,
permission=PERM_READ | PERM_CREATE,
)
crud_users["update"].grainy_permissions.create(
namespace=SHARED["net_rw3_ok"].grainy_namespace,
permission=PERM_READ | PERM_UPDATE,
)
# undelete in case they got flagged as deleted
for name, obj in list(SHARED.items()):
if (
hasattr(obj, "status")
and obj.status == "deleted"
and obj != net_rd
and getattr(obj, "name", "").find("DUPE") == -1
):
obj.status = "ok"
obj.save()
Organization.objects.create(
name="Suggested Entitites", status="ok", id=settings.SUGGEST_ENTITY_ORG
)
cls.log("Setup for API testing completed!")
@classmethod
def cleanup(cls, *args, **options):
cls.log("Cleaning up...")
deleted = 0
for k, obj in list(SHARED.items()):
if hasattr(obj, "delete"):
# print "HARD deleting ", obj
try:
obj.delete(hard=True)
deleted += 1
except AssertionError:
pass
elif k[-3:] == "_id":
reftag = re.match("^(.+)_id$", k).group(1)
cls = REFTAG_MAP.get(reftag)
if cls:
try:
inst = cls.objects.get(id=obj)
# print "HARD deleting ",inst
deleted += 1
inst.delete()
except cls.DoesNotExist:
pass
print("Deleted", deleted, "objects")
def handle(self, *args, **options):
try:
self.prepare()
except IntegrityError as inst:
print(inst)
self.cleanup()
print("Cleaned up after inegrity error, please try again ..")
return
if options["setup"]:
return
if not options["only"]:
suite = unittest.TestLoader().loadTestsFromTestCase(TestJSON)
else:
only = options["only"].split(",")
funcs = []
for key in list(vars(TestJSON).keys()):
for o in only:
if key[:5] == "test_" and key.find(o) > -1:
funcs.append(
"peeringdb_server.management.commands.pdb_api_test.TestJSON.%s"
% key
)
funcs = sorted(funcs)
suite = unittest.TestLoader().loadTestsFromNames(funcs)
unittest.TextTestRunner(verbosity=2).run(suite)
self.cleanup()
| 34.899057 | 192 | 0.487791 |
import pytest
import copy
import unittest
import uuid
import random
import re
import time
import datetime
import json
from twentyc.rpc import (
RestClient,
PermissionDeniedException,
InvalidRequestException,
NotFoundException,
)
from grainy.const import (
PERM_READ,
PERM_UPDATE,
PERM_CREATE,
PERM_DELETE,
)
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group
from django.conf import settings
from django.db.utils import IntegrityError
from rest_framework import serializers
from rest_framework.test import APIRequestFactory
from peeringdb_server.models import (
REFTAG_MAP,
QUEUE_ENABLED,
User,
Organization,
Network,
InternetExchange,
Facility,
NetworkContact,
NetworkIXLan,
NetworkFacility,
IXLan,
IXLanPrefix,
InternetExchangeFacility,
DeskProTicket,
)
from peeringdb_server.serializers import REFTAG_MAP as REFTAG_MAP_SLZ
from peeringdb_server import inet, settings as pdb_settings
from peeringdb_server.rest import NetworkViewSet
START_TIMESTAMP = time.time()
SHARED = {}
NUMERIC_TESTS = {
"lt": "Less",
"lte": "LessEqual",
"gt": "Greater",
"gte": "GreaterEqual",
"": "Equal",
}
DATETIME = datetime.datetime.now()
DATE = DATETIME.date()
DATE_YDAY = DATE - datetime.timedelta(days=1)
DATE_TMRW = DATE - datetime.timedelta(days=-1)
DATES = {
"today": (DATE, DATE.strftime("%Y-%m-%d")),
"yesterday": (DATE_YDAY, DATE_YDAY.strftime("%Y-%m-%d")),
"tomorrow": (DATE_TMRW, DATE_TMRW.strftime("%Y-%m-%d")),
}
ORG_RW = "API Test Organization RW"
ORG_RW_PENDING = "%s:Pending" % ORG_RW
ORG_R = "API Test Organization R"
NET_R = "%s:Network" % ORG_R
NET_R_PENDING = "%s:Pending" % NET_R
NET_R_DELETED = "%s:Deleted" % NET_R
IX_R = "%s:Exchange" % ORG_R
FAC_R = "%s:Facility" % ORG_R
USER = {"user": "api_test", "password": "89c8ec05-b897"}
USER_ORG_ADMIN = {"user": "api_test_org_admin", "password": "89c8ec05-b897"}
USER_ORG_MEMBER = {"user": "api_test_org_member", "password": "89c8ec05-b897"}
USER_CRUD = {
"delete": {"user": "api_test_crud_delete", "password": "89c8ec05-b897"},
"update": {"user": "api_test_crud_update", "password": "89c8ec05-b897"},
"create": {"user": "api_test_crud_create", "password": "89c8ec05-b897"},
}
URL = settings.API_URL
CITY = "Chicago"
COUNTRY = "US"
CONTINENT = "North America"
PHONE = "+12065550199"
WEBSITE = "http://www.test.apitest"
STATE = "IL"
ZIPCODE = "1-2345"
NOTE = "This is a test entry made by a script to test out the API"
EMAIL = "test@20c.com"
VERBOSE = False
PREFIXES_V4 = [
"206.223.114.0/24",
"206.223.115.0/24",
"206.223.116.0/24",
"206.223.117.0/24",
"206.223.118.0/24",
"206.223.119.0/24",
"206.223.120.0/24",
"206.223.121.0/24",
"206.223.122.0/24",
]
PREFIXES_V6 = [
"2001:504:0:1::/64",
"2001:504:0:2::/64",
"2001:504:0:3::/64",
"2001:504:0:4::/64",
"2001:504:0:5::/64",
"2001:504:0:6::/64",
"2001:504:0:7::/64",
"2001:504:0:8::/64",
"2001:504:0:9::/64",
]
class TestJSON(unittest.TestCase):
rest_client = RestClient
PREFIX_COUNT = 110
IP4_COUNT = 1
IP6_COUNT = 1
@classmethod
def get_ip6(cls, ixlan):
hosts = []
for host in (
ixlan.ixpfx_set.filter(status=ixlan.status, protocol=6)
.first()
.prefix.hosts()
):
if len(hosts) < 100:
hosts.append(host)
else:
break
r = "{}".format(hosts[cls.IP6_COUNT])
cls.IP6_COUNT += 1
return r
@classmethod
def get_ip4(cls, ixlan):
hosts = []
for host in (
ixlan.ixpfx_set.filter(status=ixlan.status, protocol=4)
.first()
.prefix.hosts()
):
if len(hosts) < 100:
hosts.append(host)
else:
break
r = "{}".format(hosts[cls.IP4_COUNT])
cls.IP4_COUNT += 1
return r
@classmethod
def get_prefix4(cls):
r = f"206.41.{cls.PREFIX_COUNT}.0/24"
cls.PREFIX_COUNT += 1
return r
@classmethod
def get_prefix6(cls):
r = f"2001:504:41:{cls.PREFIX_COUNT}::/64"
cls.PREFIX_COUNT += 1
return r
def setUp(self):
self.db_guest = self.rest_client(URL, verbose=VERBOSE)
self.db_user = self.rest_client(URL, verbose=VERBOSE, **USER)
self.db_org_member = self.rest_client(URL, verbose=VERBOSE, **USER_ORG_MEMBER)
self.db_org_admin = self.rest_client(URL, verbose=VERBOSE, **USER_ORG_ADMIN)
self.user_org_admin = User.objects.get(username="api_test_org_admin")
self.user_org_member = User.objects.get(username="api_test_org_member")
for p, specs in list(USER_CRUD.items()):
setattr(
self, "db_crud_%s" % p, self.rest_client(URL, verbose=VERBOSE, **specs)
)
def all_dbs(self, exclude=[]):
return [
db
for db in [
self.db_guest,
self.db_org_member,
self.db_user,
self.db_org_admin,
self.db_crud_create,
self.db_crud_delete,
self.db_crud_update,
]
if db not in exclude
]
def readonly_dbs(self, exclude=[]):
return [
db
for db in [self.db_guest, self.db_org_member, self.db_user]
if db not in exclude
]
| true | true |
f73e40ebebcc8c71359c42c7a788d61b7c7a525a | 3,869 | py | Python | navigation_experiments_mc_pddl/launch/pddl_reconfig_controller_launch.py | estherag/navigation_experiments_mc_bts_pddl | 992b675c3519a726bf6b9c342402fbee2296941e | [
"Apache-2.0"
] | 3 | 2021-01-25T17:07:37.000Z | 2021-02-04T12:58:04.000Z | navigation_experiments_mc_pddl/launch/pddl_reconfig_controller_launch.py | estherag/navigation_experiments_mc_bts_pddl | 992b675c3519a726bf6b9c342402fbee2296941e | [
"Apache-2.0"
] | null | null | null | navigation_experiments_mc_pddl/launch/pddl_reconfig_controller_launch.py | estherag/navigation_experiments_mc_bts_pddl | 992b675c3519a726bf6b9c342402fbee2296941e | [
"Apache-2.0"
] | 2 | 2022-02-10T10:55:20.000Z | 2022-02-14T01:35:24.000Z | # Copyright 2019 Intelligent Robotics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription, SetEnvironmentVariable
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
import launch
import launch.actions
import launch.events
import launch_ros.actions
import launch_ros.events
import launch_ros.events.lifecycle
def generate_launch_description():
# Get the launch directory
example_dir = get_package_share_directory('navigation_experiments_mc_pddl')
stdout_linebuf_envvar = SetEnvironmentVariable(
'RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED', '1')
plansys2_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(
get_package_share_directory('plansys2_bringup'),
'launch',
'plansys2_bringup_launch_distributed.py')),
launch_arguments={'model_file': example_dir + '/pddl/patrol_w_recharge_reconfig.pddl'}.items()
)
# Specify the actions
move_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='move_action_node',
name='move_action_node',
output='screen',
parameters=[])
patrol_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='patrol_action_node',
name='patrol_action_node',
output='screen',
parameters=[])
charge_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='charge_action_node',
name='charge_action_node',
output='screen',
parameters=[])
ask_charge_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='ask_charge_action_node',
name='ask_charge_action_node',
output='screen',
parameters=[])
degraded_move_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='degraded_move_action_node',
name='degraded_move_action_node',
output='screen',
parameters=[])
reconfigure_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='reconfigure_action_node',
name='reconfigure_action_node',
output='screen',
parameters=[])
recover_nav_sensor_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='recover_nav_sensor_node',
name='recover_nav_sensor_node',
output='screen',
parameters=[])
#pddl_controller_cmd = Node(
# package='navigation_experiments_mc_pddl',
# executable='patrolling_controller_node',
# name='patrolling_controller_node',
# output='screen',
# parameters=[])
# Create the launch description and populate
ld = LaunchDescription()
# Set environment variables
ld.add_action(stdout_linebuf_envvar)
# Declare the launch options
ld.add_action(plansys2_cmd)
ld.add_action(move_cmd)
ld.add_action(patrol_cmd)
ld.add_action(charge_cmd)
ld.add_action(ask_charge_cmd)
ld.add_action(degraded_move_cmd)
ld.add_action(reconfigure_cmd)
ld.add_action(recover_nav_sensor_cmd)
#ld.add_action(pddl_controller_cmd)
return ld
| 31.713115 | 102 | 0.71207 |
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription, SetEnvironmentVariable
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
import launch
import launch.actions
import launch.events
import launch_ros.actions
import launch_ros.events
import launch_ros.events.lifecycle
def generate_launch_description():
example_dir = get_package_share_directory('navigation_experiments_mc_pddl')
stdout_linebuf_envvar = SetEnvironmentVariable(
'RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED', '1')
plansys2_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(
get_package_share_directory('plansys2_bringup'),
'launch',
'plansys2_bringup_launch_distributed.py')),
launch_arguments={'model_file': example_dir + '/pddl/patrol_w_recharge_reconfig.pddl'}.items()
)
move_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='move_action_node',
name='move_action_node',
output='screen',
parameters=[])
patrol_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='patrol_action_node',
name='patrol_action_node',
output='screen',
parameters=[])
charge_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='charge_action_node',
name='charge_action_node',
output='screen',
parameters=[])
ask_charge_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='ask_charge_action_node',
name='ask_charge_action_node',
output='screen',
parameters=[])
degraded_move_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='degraded_move_action_node',
name='degraded_move_action_node',
output='screen',
parameters=[])
reconfigure_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='reconfigure_action_node',
name='reconfigure_action_node',
output='screen',
parameters=[])
recover_nav_sensor_cmd = Node(
package='navigation_experiments_mc_pddl',
executable='recover_nav_sensor_node',
name='recover_nav_sensor_node',
output='screen',
parameters=[])
ld = LaunchDescription()
ld.add_action(stdout_linebuf_envvar)
ld.add_action(plansys2_cmd)
ld.add_action(move_cmd)
ld.add_action(patrol_cmd)
ld.add_action(charge_cmd)
ld.add_action(ask_charge_cmd)
ld.add_action(degraded_move_cmd)
ld.add_action(reconfigure_cmd)
ld.add_action(recover_nav_sensor_cmd)
return ld
| true | true |
f73e4133250d81b18962dd6f4d20939f7ba1f301 | 451 | py | Python | django_web/django_channels_example/routing.py | CarlosMart626/Django-channels-example | d8d71a10c3f31680e3c3abba45f39c64cf29d7af | [
"MIT"
] | null | null | null | django_web/django_channels_example/routing.py | CarlosMart626/Django-channels-example | d8d71a10c3f31680e3c3abba45f39c64cf29d7af | [
"MIT"
] | null | null | null | django_web/django_channels_example/routing.py | CarlosMart626/Django-channels-example | d8d71a10c3f31680e3c3abba45f39c64cf29d7af | [
"MIT"
] | null | null | null | from channels.routing import route
from channels import include
from products.consumers import ws_connect, ws_message, ws_disconnect
channel_routing = [
route("websocket.connect", ws_connect,
path=r"^/(?P<room_name>[a-zA-Z0-9_]+)/$"),
route("websocket.receive", ws_message,
path=r"^/(?P<room_name>[a-zA-Z0-9_]+)/$"),
route("websocket.disconnect", ws_disconnect,
path=r"^/(?P<room_name>[a-zA-Z0-9_]+)/$"),
]
| 32.214286 | 68 | 0.654102 | from channels.routing import route
from channels import include
from products.consumers import ws_connect, ws_message, ws_disconnect
channel_routing = [
route("websocket.connect", ws_connect,
path=r"^/(?P<room_name>[a-zA-Z0-9_]+)/$"),
route("websocket.receive", ws_message,
path=r"^/(?P<room_name>[a-zA-Z0-9_]+)/$"),
route("websocket.disconnect", ws_disconnect,
path=r"^/(?P<room_name>[a-zA-Z0-9_]+)/$"),
]
| true | true |
f73e4295e60a021282c4ef7ca4f7ee9b03087754 | 845 | py | Python | btre-project/pages/views.py | amirzp/btre-project | 270fa639d71df5d3d11c356715e6b134da23b9cd | [
"MIT"
] | 1 | 2021-03-02T11:43:30.000Z | 2021-03-02T11:43:30.000Z | btre-project/pages/views.py | amirzp/btre-project | 270fa639d71df5d3d11c356715e6b134da23b9cd | [
"MIT"
] | null | null | null | btre-project/pages/views.py | amirzp/btre-project | 270fa639d71df5d3d11c356715e6b134da23b9cd | [
"MIT"
] | null | null | null | from django.shortcuts import render
from listings.models import Listings
from listings import choices
from realtors.models import Realtor
def index(request):
lisitngs = Listings.objects.order_by(
'-is_data'
).filter(is_published=True)[:3]
context = {
'listings': lisitngs,
'state_choices': choices.state_choices,
'bedroom_choices': choices.bedroom_choices,
'price_choices': choices.price_choices
}
return render(request, 'pages/index.html', context)
def about(request):
realtors = Realtor.objects.order_by('-hire_data').all()
mvp_realtors = Realtor.objects.order_by(
'-hire_data'
).filter(is_mvp=True)[:1]
context = {
'realtors': realtors,
'mvp_realtors': mvp_realtors
}
return render(request, 'pages/about.html', context)
| 26.40625 | 59 | 0.668639 | from django.shortcuts import render
from listings.models import Listings
from listings import choices
from realtors.models import Realtor
def index(request):
lisitngs = Listings.objects.order_by(
'-is_data'
).filter(is_published=True)[:3]
context = {
'listings': lisitngs,
'state_choices': choices.state_choices,
'bedroom_choices': choices.bedroom_choices,
'price_choices': choices.price_choices
}
return render(request, 'pages/index.html', context)
def about(request):
realtors = Realtor.objects.order_by('-hire_data').all()
mvp_realtors = Realtor.objects.order_by(
'-hire_data'
).filter(is_mvp=True)[:1]
context = {
'realtors': realtors,
'mvp_realtors': mvp_realtors
}
return render(request, 'pages/about.html', context)
| true | true |
f73e4490011729f262f99b48339682a74fdde1ec | 1,499 | py | Python | Deployment/DispatchInterfaces/DummyInterface.py | Caius-Lu/Savior | 47c22e06c38cc9b5f7007d79f791015c8b2b76aa | [
"BSD-2-Clause"
] | 108 | 2021-03-19T03:45:48.000Z | 2022-03-29T12:19:38.000Z | Deployment/DispatchInterfaces/DummyInterface.py | Caius-Lu/Savior | 47c22e06c38cc9b5f7007d79f791015c8b2b76aa | [
"BSD-2-Clause"
] | 2 | 2021-05-12T07:26:21.000Z | 2021-07-16T12:53:52.000Z | Deployment/DispatchInterfaces/DummyInterface.py | Caius-Lu/Savior | 47c22e06c38cc9b5f7007d79f791015c8b2b76aa | [
"BSD-2-Clause"
] | 27 | 2021-03-19T05:50:26.000Z | 2021-12-28T07:13:09.000Z | from fastapi import APIRouter, Form
from fastapi.responses import ORJSONResponse
from Deployment.ConsumerServices.DummyService import DummyService4Task, DummyService1Task, DummyService2Task, \
DummyService3Task
from Utils.DAG import DAG
from Utils.ServiceUtils import wait_and_compose_all_task_result
router = APIRouter()
@router.post('/dummy_interface_1')
async def dummy_interface(
dummy_input_1: str = Form(...),
dummy_input_2: int = Form(...),
dummy_input_3: float = Form(...),
):
dag = DAG()
task1 = DummyService1Task(_dag=dag)
task1.add_dependency_from_value('dummy_input_1', dummy_input_1)
task2 = DummyService2Task(_dag=dag)
task2.add_dependency_from_value('dummy_input_1', dummy_input_2)
task3 = DummyService3Task(_dag=dag)
task3.add_dependency_from_task('dummy_input_1', task2, 'result_1')
task4 = DummyService4Task(_dag=dag)
task4.add_dependency_from_value('dummy_input_1', dummy_input_3)
task4.add_dependency_from_task('dummy_input_2', task3, 'result_1')
task4.add_dependency_from_task('dummy_input_3', task3, 'result_2')
task4.add_dependency_from_task('dummy_input_4', task2, 'result_1')
task4.add_dependency_from_task('dummy_input_5', task1, 'result_1')
final_result = await wait_and_compose_all_task_result(task1, task2, task3, task4)
to_return_result = dict()
to_return_result['result'] = final_result
to_return_result['dag'] = dag.dump()
return ORJSONResponse(to_return_result)
| 41.638889 | 111 | 0.762508 | from fastapi import APIRouter, Form
from fastapi.responses import ORJSONResponse
from Deployment.ConsumerServices.DummyService import DummyService4Task, DummyService1Task, DummyService2Task, \
DummyService3Task
from Utils.DAG import DAG
from Utils.ServiceUtils import wait_and_compose_all_task_result
router = APIRouter()
@router.post('/dummy_interface_1')
async def dummy_interface(
dummy_input_1: str = Form(...),
dummy_input_2: int = Form(...),
dummy_input_3: float = Form(...),
):
dag = DAG()
task1 = DummyService1Task(_dag=dag)
task1.add_dependency_from_value('dummy_input_1', dummy_input_1)
task2 = DummyService2Task(_dag=dag)
task2.add_dependency_from_value('dummy_input_1', dummy_input_2)
task3 = DummyService3Task(_dag=dag)
task3.add_dependency_from_task('dummy_input_1', task2, 'result_1')
task4 = DummyService4Task(_dag=dag)
task4.add_dependency_from_value('dummy_input_1', dummy_input_3)
task4.add_dependency_from_task('dummy_input_2', task3, 'result_1')
task4.add_dependency_from_task('dummy_input_3', task3, 'result_2')
task4.add_dependency_from_task('dummy_input_4', task2, 'result_1')
task4.add_dependency_from_task('dummy_input_5', task1, 'result_1')
final_result = await wait_and_compose_all_task_result(task1, task2, task3, task4)
to_return_result = dict()
to_return_result['result'] = final_result
to_return_result['dag'] = dag.dump()
return ORJSONResponse(to_return_result)
| true | true |
f73e44e6b5f190162213c848e5a592d8191ece07 | 5,209 | py | Python | comctl32.py | jacoblusk/python-dll-injector | 40ecf5162e2d2f24d6d4b71959a790aca59edd22 | [
"BSD-3-Clause"
] | 1 | 2021-12-04T09:23:08.000Z | 2021-12-04T09:23:08.000Z | comctl32.py | jacoblusk/Python-DLL-Injector | 40ecf5162e2d2f24d6d4b71959a790aca59edd22 | [
"BSD-3-Clause"
] | 1 | 2021-12-04T07:52:15.000Z | 2021-12-04T07:52:15.000Z | comctl32.py | jacoblusk/python-dll-injector | 40ecf5162e2d2f24d6d4b71959a790aca59edd22 | [
"BSD-3-Clause"
] | null | null | null | from ctypes.wintypes import *
from wintypes_extended import *
from winapi_error import *
from user32 import *
import ctypes
class ListViewMessage(enum.IntEnum):
FIRST = 0x1000
GETITEMA = FIRST + 5
INSERTITEMA = FIRST + 7
GETNEXTITEM = FIRST + 12
GETITEMTEXTA = FIRST + 45
INSERTITEMW = FIRST + 77
INSERTCOLUMNA = FIRST + 27
INSERTCOLUMNW = FIRST + 97
SETIMAGELIST = FIRST + 3
SETITEMTEXTA = FIRST + 46
SETITEMTEXTW = FIRST + 116
SORTITEMS = FIRST + 48
def ListView_InsertItemA(hwnd, pitem):
return SendMessageA(hwnd, ListViewMessage.INSERTITEMA, WPARAM(0),
LPARAM(ctypes.cast(pitem, ctypes.c_void_p).value))
def ListView_InsertColumnA(hwnd, iCol, pcol):
return SendMessageA(hwnd, ListViewMessage.INSERTCOLUMNA, WPARAM(iCol),
LPARAM(ctypes.cast(pcol, ctypes.c_void_p).value))
def ListView_SetImageList(hwnd, himl, iImageList):
return ctypes.cast(
SendMessageA(hwnd, ListViewMessage.SETIMAGELIST, WPARAM(iImageList),
LPARAM(ctypes.cast(himl, ctypes.c_void_p).value)),
ctypes.c_void_p)
def ListView_GetNextItem(hwnd, i, flags):
return SendMessageA(hwnd, ListViewMessage.GETNEXTITEM, WPARAM(i), MAKELPARAM((flags), 0))
def ListView_GetItemA(hwnd, pitem):
return SendMessageA(hwnd, ListViewMessage.GETITEMA, WPARAM(0),
LPARAM(ctypes.cast(pitem, ctypes.c_void_p).value))
def ListView_GetItemTextA(hwndLV, i, iSubItem_, pszText_, cchTextMax_):
_macro_lvi = LVITEMA()
_macro_lvi.iSubItem = iSubItem_
_macro_lvi.cchTextMax = cchTextMax_
_macro_lvi.pszText = pszText_
SendMessageA((hwndLV), ListViewMessage.GETITEMTEXTA, WPARAM(i),
LPARAM(ctypes.cast(ctypes.byref(_macro_lvi), ctypes.c_void_p).value))
def ListView_SetItemTextA(hwnd, i, iSubItem, pszText):
lvitem = LVITEMA()
lvitem.iSubItem = iSubItem
lvitem.pszText = pszText
SendMessageA(hwnd, ListViewMessage.SETITEMTEXTA, WPARAM(i),
LPARAM(ctypes.cast(ctypes.byref(lvitem), ctypes.c_void_p).value))
def ListView_SortItems(hwndLV, _pfnCompare, _lPrm):
return SendMessageA(hwndLV, ListViewMessage.SORTITEMS,
WPARAM(_lPrm),
LPARAM(ctypes.cast(_pfnCompare, ctypes.c_void_p).value))
def ImageList_AddIcon(himl, hicon):
ImageList_ReplaceIcon(himl, -1, hicon)
class LVITEMA(ctypes.Structure):
_fields_ = [
('mask', UINT),
('iItem', ctypes.c_int),
('iSubItem', ctypes.c_int),
('state', UINT),
('stateMask', UINT),
('pszText', LPCSTR),
('cchTextMax', ctypes.c_int),
('iImage', ctypes.c_int),
('lParam', LPARAM),
('iIndent', ctypes.c_int),
('iGroupId', ctypes.c_int),
('cColumns', UINT),
('puColumns', ctypes.POINTER(UINT)),
('piColFmt', ctypes.POINTER(ctypes.c_int)),
('iGroup', ctypes.c_int)
]
LPLVITEMA = ctypes.POINTER(LVITEMA)
class NMHDR(ctypes.Structure):
_fields_ = [
('hwndFrom', HWND),
('idFrom', ctypes.POINTER(UINT)),
('code', UINT)
]
class LV_DISPINFOA(ctypes.Structure):
_fields_ = [
('hdr', NMHDR),
('item', LVITEMA)
]
LPLV_DISPINFOA = ctypes.POINTER(LV_DISPINFOA)
class NM_LISTVIEW(ctypes.Structure):
_fields_ = [
('hdr', NMHDR),
('iItem', ctypes.c_int),
('iSubItem', ctypes.c_int),
('uNewState', UINT),
('uOldState', UINT),
('uChanged', UINT),
('ptAction', POINT),
('lParam', LPARAM),
]
LPNM_LISTVIEW = ctypes.POINTER(NM_LISTVIEW)
class LVCOLUMNA(ctypes.Structure):
_fields_ = [
('mask', UINT),
('fmt', ctypes.c_int),
('cx', ctypes.c_int),
('pszText', LPCSTR),
('cchTextMax', ctypes.c_int),
('iSubItem', ctypes.c_int),
('iImage', ctypes.c_int),
('iOrder', ctypes.c_int),
('cxMin', ctypes.c_int),
('cxDefault', ctypes.c_int),
('cxIdeal', ctypes.c_int),
]
LPLVCOLUMNA = ctypes.POINTER(LVCOLUMNA)
class INITCOMMONCONTROLSEX(ctypes.Structure):
_fields_ = [
('dwSize', DWORD),
('dwICC', DWORD)
]
LPINITCOMMONCONTROLSEX = ctypes.POINTER(INITCOMMONCONTROLSEX)
class InitCommonControlsExError(Exception):
pass
def InitCommonControlsEx_errcheck(result, func, args):
if result == 0:
raise InitCommonControlsExError("InitCommonControlsEx failed.")
ImageList_Create = ctypes.windll.comctl32.ImageList_Create
ImageList_Create.argtypes = [ctypes.c_int,
ctypes.c_int, UINT, ctypes.c_int, ctypes.c_int]
ImageList_Create.restype = ctypes.c_void_p
ImageList_Create.errcheck = LPVOID_errcheck
ImageList_ReplaceIcon = ctypes.windll.comctl32.ImageList_ReplaceIcon
ImageList_ReplaceIcon.argtypes = [ctypes.c_void_p, ctypes.c_int, HICON]
ImageList_ReplaceIcon.restype = ctypes.c_int
InitCommonControlsEx = ctypes.windll.comctl32.InitCommonControlsEx
InitCommonControlsEx.argtypes = [LPINITCOMMONCONTROLSEX]
InitCommonControlsEx.restype = BOOL
InitCommonControlsEx.errcheck = InitCommonControlsEx_errcheck
| 28.005376 | 93 | 0.658476 | from ctypes.wintypes import *
from wintypes_extended import *
from winapi_error import *
from user32 import *
import ctypes
class ListViewMessage(enum.IntEnum):
FIRST = 0x1000
GETITEMA = FIRST + 5
INSERTITEMA = FIRST + 7
GETNEXTITEM = FIRST + 12
GETITEMTEXTA = FIRST + 45
INSERTITEMW = FIRST + 77
INSERTCOLUMNA = FIRST + 27
INSERTCOLUMNW = FIRST + 97
SETIMAGELIST = FIRST + 3
SETITEMTEXTA = FIRST + 46
SETITEMTEXTW = FIRST + 116
SORTITEMS = FIRST + 48
def ListView_InsertItemA(hwnd, pitem):
return SendMessageA(hwnd, ListViewMessage.INSERTITEMA, WPARAM(0),
LPARAM(ctypes.cast(pitem, ctypes.c_void_p).value))
def ListView_InsertColumnA(hwnd, iCol, pcol):
return SendMessageA(hwnd, ListViewMessage.INSERTCOLUMNA, WPARAM(iCol),
LPARAM(ctypes.cast(pcol, ctypes.c_void_p).value))
def ListView_SetImageList(hwnd, himl, iImageList):
return ctypes.cast(
SendMessageA(hwnd, ListViewMessage.SETIMAGELIST, WPARAM(iImageList),
LPARAM(ctypes.cast(himl, ctypes.c_void_p).value)),
ctypes.c_void_p)
def ListView_GetNextItem(hwnd, i, flags):
return SendMessageA(hwnd, ListViewMessage.GETNEXTITEM, WPARAM(i), MAKELPARAM((flags), 0))
def ListView_GetItemA(hwnd, pitem):
return SendMessageA(hwnd, ListViewMessage.GETITEMA, WPARAM(0),
LPARAM(ctypes.cast(pitem, ctypes.c_void_p).value))
def ListView_GetItemTextA(hwndLV, i, iSubItem_, pszText_, cchTextMax_):
_macro_lvi = LVITEMA()
_macro_lvi.iSubItem = iSubItem_
_macro_lvi.cchTextMax = cchTextMax_
_macro_lvi.pszText = pszText_
SendMessageA((hwndLV), ListViewMessage.GETITEMTEXTA, WPARAM(i),
LPARAM(ctypes.cast(ctypes.byref(_macro_lvi), ctypes.c_void_p).value))
def ListView_SetItemTextA(hwnd, i, iSubItem, pszText):
lvitem = LVITEMA()
lvitem.iSubItem = iSubItem
lvitem.pszText = pszText
SendMessageA(hwnd, ListViewMessage.SETITEMTEXTA, WPARAM(i),
LPARAM(ctypes.cast(ctypes.byref(lvitem), ctypes.c_void_p).value))
def ListView_SortItems(hwndLV, _pfnCompare, _lPrm):
return SendMessageA(hwndLV, ListViewMessage.SORTITEMS,
WPARAM(_lPrm),
LPARAM(ctypes.cast(_pfnCompare, ctypes.c_void_p).value))
def ImageList_AddIcon(himl, hicon):
ImageList_ReplaceIcon(himl, -1, hicon)
class LVITEMA(ctypes.Structure):
_fields_ = [
('mask', UINT),
('iItem', ctypes.c_int),
('iSubItem', ctypes.c_int),
('state', UINT),
('stateMask', UINT),
('pszText', LPCSTR),
('cchTextMax', ctypes.c_int),
('iImage', ctypes.c_int),
('lParam', LPARAM),
('iIndent', ctypes.c_int),
('iGroupId', ctypes.c_int),
('cColumns', UINT),
('puColumns', ctypes.POINTER(UINT)),
('piColFmt', ctypes.POINTER(ctypes.c_int)),
('iGroup', ctypes.c_int)
]
LPLVITEMA = ctypes.POINTER(LVITEMA)
class NMHDR(ctypes.Structure):
_fields_ = [
('hwndFrom', HWND),
('idFrom', ctypes.POINTER(UINT)),
('code', UINT)
]
class LV_DISPINFOA(ctypes.Structure):
_fields_ = [
('hdr', NMHDR),
('item', LVITEMA)
]
LPLV_DISPINFOA = ctypes.POINTER(LV_DISPINFOA)
class NM_LISTVIEW(ctypes.Structure):
_fields_ = [
('hdr', NMHDR),
('iItem', ctypes.c_int),
('iSubItem', ctypes.c_int),
('uNewState', UINT),
('uOldState', UINT),
('uChanged', UINT),
('ptAction', POINT),
('lParam', LPARAM),
]
LPNM_LISTVIEW = ctypes.POINTER(NM_LISTVIEW)
class LVCOLUMNA(ctypes.Structure):
_fields_ = [
('mask', UINT),
('fmt', ctypes.c_int),
('cx', ctypes.c_int),
('pszText', LPCSTR),
('cchTextMax', ctypes.c_int),
('iSubItem', ctypes.c_int),
('iImage', ctypes.c_int),
('iOrder', ctypes.c_int),
('cxMin', ctypes.c_int),
('cxDefault', ctypes.c_int),
('cxIdeal', ctypes.c_int),
]
LPLVCOLUMNA = ctypes.POINTER(LVCOLUMNA)
class INITCOMMONCONTROLSEX(ctypes.Structure):
_fields_ = [
('dwSize', DWORD),
('dwICC', DWORD)
]
LPINITCOMMONCONTROLSEX = ctypes.POINTER(INITCOMMONCONTROLSEX)
class InitCommonControlsExError(Exception):
pass
def InitCommonControlsEx_errcheck(result, func, args):
if result == 0:
raise InitCommonControlsExError("InitCommonControlsEx failed.")
ImageList_Create = ctypes.windll.comctl32.ImageList_Create
ImageList_Create.argtypes = [ctypes.c_int,
ctypes.c_int, UINT, ctypes.c_int, ctypes.c_int]
ImageList_Create.restype = ctypes.c_void_p
ImageList_Create.errcheck = LPVOID_errcheck
ImageList_ReplaceIcon = ctypes.windll.comctl32.ImageList_ReplaceIcon
ImageList_ReplaceIcon.argtypes = [ctypes.c_void_p, ctypes.c_int, HICON]
ImageList_ReplaceIcon.restype = ctypes.c_int
InitCommonControlsEx = ctypes.windll.comctl32.InitCommonControlsEx
InitCommonControlsEx.argtypes = [LPINITCOMMONCONTROLSEX]
InitCommonControlsEx.restype = BOOL
InitCommonControlsEx.errcheck = InitCommonControlsEx_errcheck
| true | true |
f73e45d5b9b7afdf5933747546d129988abd5898 | 1,893 | py | Python | guild/plugins/skopt_forest_main.py | timt51/guildai | 8d3aa9b902e29eb60ebbb408b3a1cbd3f40fcaec | [
"Apache-2.0"
] | 694 | 2018-11-30T01:06:30.000Z | 2022-03-31T14:46:26.000Z | guild/plugins/skopt_forest_main.py | timt51/guildai | 8d3aa9b902e29eb60ebbb408b3a1cbd3f40fcaec | [
"Apache-2.0"
] | 323 | 2018-11-05T17:44:34.000Z | 2022-03-31T16:56:41.000Z | guild/plugins/skopt_forest_main.py | timt51/guildai | 8d3aa9b902e29eb60ebbb408b3a1cbd3f40fcaec | [
"Apache-2.0"
] | 68 | 2019-04-01T04:24:47.000Z | 2022-02-24T17:22:04.000Z | # Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
import numpy.core.umath_tests # pylint: disable=unused-import
import skopt
from guild import batch_util
from . import skopt_util
log = logging.getLogger("guild")
def main():
batch_util.init_logging()
batch_run = batch_util.batch_run()
skopt_util.handle_seq_trials(batch_run, _suggest_x)
def _suggest_x(dims, x0, y0, random_start, random_state, opts):
res = skopt.forest_minimize(
lambda *args: 0,
dims,
n_calls=1,
n_random_starts=1 if random_start else 0,
x0=x0,
y0=y0,
random_state=random_state,
kappa=opts["kappa"],
xi=opts["xi"],
)
return res.x_iters[-1], res.random_state
def gen_trials(
flags, prev_results_cb, opt_random_starts=3, opt_kappa=1.96, opt_xi=0.01, **kw
):
"""ipy interface for trials."""
return skopt_util.ipy_gen_trials(
flags,
prev_results_cb,
_suggest_x,
random_starts=opt_random_starts,
suggest_x_opts={
"kappa": opt_kappa,
"xi": opt_xi,
},
**kw
)
if __name__ == "__main__":
main()
| 25.931507 | 82 | 0.686212 |
from __future__ import absolute_import
from __future__ import division
import logging
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
import numpy.core.umath_tests
import skopt
from guild import batch_util
from . import skopt_util
log = logging.getLogger("guild")
def main():
batch_util.init_logging()
batch_run = batch_util.batch_run()
skopt_util.handle_seq_trials(batch_run, _suggest_x)
def _suggest_x(dims, x0, y0, random_start, random_state, opts):
res = skopt.forest_minimize(
lambda *args: 0,
dims,
n_calls=1,
n_random_starts=1 if random_start else 0,
x0=x0,
y0=y0,
random_state=random_state,
kappa=opts["kappa"],
xi=opts["xi"],
)
return res.x_iters[-1], res.random_state
def gen_trials(
flags, prev_results_cb, opt_random_starts=3, opt_kappa=1.96, opt_xi=0.01, **kw
):
return skopt_util.ipy_gen_trials(
flags,
prev_results_cb,
_suggest_x,
random_starts=opt_random_starts,
suggest_x_opts={
"kappa": opt_kappa,
"xi": opt_xi,
},
**kw
)
if __name__ == "__main__":
main()
| true | true |
f73e45e2c3aeed08f39ce9125f648b8297aeb96a | 2,822 | py | Python | tests/test_threaded_cached_property.py | bcho/cached-property | 53bc42fb32c95bd2fbd62083cdc4e91d698b8a90 | [
"BSD-3-Clause"
] | null | null | null | tests/test_threaded_cached_property.py | bcho/cached-property | 53bc42fb32c95bd2fbd62083cdc4e91d698b8a90 | [
"BSD-3-Clause"
] | null | null | null | tests/test_threaded_cached_property.py | bcho/cached-property | 53bc42fb32c95bd2fbd62083cdc4e91d698b8a90 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
test_threaded_cache_property.py
----------------------------------
Tests for `cached-property` module, threaded_cache_property.
"""
from time import sleep
from threading import Thread, Lock
import unittest
from cached_property import threaded_cached_property
class TestCachedProperty(unittest.TestCase):
def test_cached_property(self):
class Check(object):
def __init__(self):
self.total1 = 0
self.total2 = 0
@property
def add_control(self):
self.total1 += 1
return self.total1
@threaded_cached_property
def add_cached(self):
self.total2 += 1
return self.total2
c = Check()
# The control shows that we can continue to add 1.
self.assertEqual(c.add_control, 1)
self.assertEqual(c.add_control, 2)
# The cached version demonstrates how nothing new is added
self.assertEqual(c.add_cached, 1)
self.assertEqual(c.add_cached, 1)
def test_reset_cached_property(self):
class Check(object):
def __init__(self):
self.total = 0
@threaded_cached_property
def add_cached(self):
self.total += 1
return self.total
c = Check()
# Run standard cache assertion
self.assertEqual(c.add_cached, 1)
self.assertEqual(c.add_cached, 1)
# Reset the cache.
del c.add_cached
self.assertEqual(c.add_cached, 2)
self.assertEqual(c.add_cached, 2)
def test_none_cached_property(self):
class Check(object):
def __init__(self):
self.total = None
@threaded_cached_property
def add_cached(self):
return self.total
c = Check()
# Run standard cache assertion
self.assertEqual(c.add_cached, None)
class TestThreadingIssues(unittest.TestCase):
def test_threads(self):
""" How well does this implementation work with threads?"""
class Check(object):
def __init__(self):
self.total = 0
self.lock = Lock()
@threaded_cached_property
def add_cached(self):
sleep(1)
# Need to guard this since += isn't atomic.
with self.lock:
self.total += 1
return self.total
c = Check()
threads = []
for x in range(10):
thread = Thread(target=lambda: c.add_cached)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.assertEqual(c.add_cached, 1)
| 24.119658 | 67 | 0.552445 |
from time import sleep
from threading import Thread, Lock
import unittest
from cached_property import threaded_cached_property
class TestCachedProperty(unittest.TestCase):
def test_cached_property(self):
class Check(object):
def __init__(self):
self.total1 = 0
self.total2 = 0
@property
def add_control(self):
self.total1 += 1
return self.total1
@threaded_cached_property
def add_cached(self):
self.total2 += 1
return self.total2
c = Check()
self.assertEqual(c.add_control, 1)
self.assertEqual(c.add_control, 2)
self.assertEqual(c.add_cached, 1)
self.assertEqual(c.add_cached, 1)
def test_reset_cached_property(self):
class Check(object):
def __init__(self):
self.total = 0
@threaded_cached_property
def add_cached(self):
self.total += 1
return self.total
c = Check()
self.assertEqual(c.add_cached, 1)
self.assertEqual(c.add_cached, 1)
del c.add_cached
self.assertEqual(c.add_cached, 2)
self.assertEqual(c.add_cached, 2)
def test_none_cached_property(self):
class Check(object):
def __init__(self):
self.total = None
@threaded_cached_property
def add_cached(self):
return self.total
c = Check()
self.assertEqual(c.add_cached, None)
class TestThreadingIssues(unittest.TestCase):
def test_threads(self):
class Check(object):
def __init__(self):
self.total = 0
self.lock = Lock()
@threaded_cached_property
def add_cached(self):
sleep(1)
with self.lock:
self.total += 1
return self.total
c = Check()
threads = []
for x in range(10):
thread = Thread(target=lambda: c.add_cached)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.assertEqual(c.add_cached, 1)
| true | true |
f73e463b48557a70d80adcf15bb7df858e3402fc | 17,474 | py | Python | tests.py | kuharan/opts | 3966ee873aebeab46ebabfa6b85a9c66c56633f7 | [
"BSD-2-Clause"
] | null | null | null | tests.py | kuharan/opts | 3966ee873aebeab46ebabfa6b85a9c66c56633f7 | [
"BSD-2-Clause"
] | 2 | 2020-06-24T07:57:43.000Z | 2021-05-19T07:36:59.000Z | tests.py | kuharan/opts | 3966ee873aebeab46ebabfa6b85a9c66c56633f7 | [
"BSD-2-Clause"
] | 2 | 2019-02-26T14:34:01.000Z | 2019-05-31T03:44:52.000Z | #!/usr/bin/env python
# coding: utf-8
"""
tests
~~~~~
Provides the tests for opts.
:copyright: 2010 by Daniel Neuhäuser
:license: BSD, see LICENSE for details
"""
import unittest
import sys
from decimal import Decimal
from StringIO import StringIO
from opts import (Node, Option, BooleanOption, IntOption, FloatOption,
DecimalOption, MultipleOptions, Positional, IntPositional,
FloatPositional, DecimalPositional, Command, Parser)
def xrange(*args):
if len(args) == 1:
start, stop, step = 0, args[0], 1
elif len(args) == 2:
start, stop, step = args[0], args[1], 1
else:
start, stop, step = args
i = start
while i <= stop:
yield i
i += step
class TestCase(unittest.TestCase):
def assertContains(self, container, item):
if item not in container:
raise AssertionError('{0!r} not in {1!r}'.format(item, container))
def assertContainsAll(self, container, items):
for item in items:
self.assertContains(container, item)
class TestNode(TestCase):
def test_short_description_fallback(self):
n = Node()
self.assertEqual(n.short_description, u"No short description.")
def test_long_description_fallback(self):
n = Node()
self.assertEqual(n.long_description, u"No long description.")
def test_long_description_fallback_to_short(self):
n = Node(short_description=u"Foobar")
self.assertEqual(n.long_description, u"Foobar")
class TestOption(TestCase):
def test_valueerror_on_init(self):
self.assertRaises(ValueError, Option)
class TestBooleanOption(TestCase):
def test_evaluate(self):
o = BooleanOption(short="b")
p = Parser(options=dict(b=o))
self.assertEqual(p.evaluate([u'-b']), ({'b': True}, []))
o = BooleanOption(short="b", default=True)
p = Parser(options=dict(b=o))
self.assertEqual(p.evaluate(['-b']), ({'b': False}, []))
class TestNumberOptions(TestCase):
def test_intoption_evaluate(self):
self.make_test(xrange(-10, 10), IntOption(short='o'))
def test_floatoption_evaluate(self):
self.make_test(xrange(-10.0, 10.0, 0.5), FloatOption(short='o'))
def test_decimaloption_evaluate(self):
self.make_test(
xrange(Decimal('-10.0'), Decimal('10.0'), Decimal('0.5')),
DecimalOption(short='o')
)
def make_test(self, range, o):
p = Parser(options=dict(o=o))
for i in range:
self.assertEqual(p.evaluate([u'-o', unicode(i)]), ({'o': i}, []))
class TestMultipleOptions(TestCase):
def test_evaluate_no_quotes(self):
o = MultipleOptions(short='o')
p = Parser(options=dict(o=o))
self.assertEqual(
p.evaluate([u'-o', u'foo,bar,baz']),
({'o': [u'foo', u'bar', u'baz']}, [])
)
def test_evaluate_with_quotes(self):
o = MultipleOptions(short='o')
p = Parser(options=dict(o=o))
self.assertEqual(
p.evaluate([u'-o', u'foo,"bar,baz"']),
({'o': [u'foo', u'bar,baz']}, [])
)
self.assertEqual(
p.evaluate([u'-o', u'"foo,bar",baz']),
({'o': [u'foo,bar', u'baz']}, [])
)
class TestPositional(TestCase):
def test_evaluate(self):
p = Parser(positionals=[Positional('foo')])
self.assertEquals(p.evaluate([u'spam']), ({}, [u'spam']))
class TestNumberPositionals(TestCase):
def test_intpositional_evaluate(self):
self.make_test(xrange(10), IntPositional('foo'))
def test_floatpositional_evaluate(self):
self.make_test(xrange(10, 0.5), FloatPositional('foo'))
def test_decimalpositional_evaluate(self):
self.make_test(
xrange(Decimal('0'), Decimal('10.0'), Decimal('0.5')),
DecimalPositional('foo')
)
def make_test(self, range, p):
parser = Parser(positionals=[p])
for i in range:
self.assertEqual(parser.evaluate([unicode(i)]), ({}, [i]))
class TestCommand(TestCase):
def test_remaining_arguments(self):
c = Command(options={'a': Option('a')})
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'foo']),
({'c': ({}, [u'foo'])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo']),
({'c': ({'a': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo', u'bar']),
({u'c': ({'a': u'foo'}, [u'bar'])}, [])
)
def test_options(self):
class TestDeclarative(Command):
spam = Option('a', 'asomething')
eggs = Option('b', 'bsomething')
a = TestDeclarative()
b = Command(options={
'spam': Option('a', 'asomething'),
'eggs': Option('b', 'bsomething')})
for c in [a, b]:
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo']),
({'c': ({'spam': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--asomething', u'foo']),
({'c': ({'spam': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-b', u'foo']),
({'c': ({u'eggs': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--bsomething', u'foo']),
({'c': ({u'eggs': u'foo'}, [])}, [])
)
def test_commands(self):
class TestDeclarative(Command):
spam = Command()
eggs = Command()
a = TestDeclarative()
b = Command(commands={
'spam': Command(),
'eggs': Command()})
cp = [u'script_name']
for c in [a, b]:
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'spam']),
({'c': ({u'spam': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'eggs']),
({'c': ({'eggs': ({}, [])}, [])}, [])
)
def test_abbreviations(self):
c = Command(
options={
'stack': Option(long='stack'),
'stash': Option(long='stash')},
commands={
'stack': Command(),
'stash': Command()})
p = Parser(commands=dict(c=c))
cp = [u'script_name']
for s in [u's', u'st', u'sta']:
cmd = [u'c', s]
result = ({'c': ({}, [s])}, [])
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(
p.evaluate([u'c', u'stac']),
({'c': ({u'stack': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'stas']),
({'c': ({u'stash': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--stac', u'foo']),
({'c': ({u'stack': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--stas', u'foo']),
({'c': ({u'stash': u'foo'}, [])}, [])
)
def test_disallow_abbreviated_commands(self):
class NewCommand(Command):
allow_abbreviated_commands = False
c = NewCommand(commands={
'foo': Command()
})
p = Parser(commands=dict(c=c))
self.assertEqual(p.evaluate([u'c', u'f']), ({'c': ({}, [u'f'])}, []))
def test_apply_defaults(self):
class FooParser(Parser):
activate = BooleanOption('a')
foo = Command(
options={
'spam': Option('a'),
'eggs': Option('b')
}
)
p = FooParser()
p.apply_defaults({
'activate': 'huhu',
'foo': {
'spam': 'bla',
'eggs': 'blubb'
}
})
self.assertEquals(p.options['activate'].default, 'huhu')
self.assertEquals(p.commands['foo'].options['spam'].default, 'bla')
self.assertEquals(p.commands['foo'].options['eggs'].default, 'blubb')
def test_getattr(self):
p = Parser(
options={
'activate': Option('a')
},
commands={
'foo': Command(options={
'spam': Option('b'),
'eggs': Option('c')
})
}
)
p.activate
p.foo
p.foo.spam
p.foo.eggs
def test_dynamically_adding_nodes(self):
p = Parser()
p.commands['foo'] = Command()
p.commands['foo'].options['a'] = BooleanOption('a')
p.options['bar'] = Option('b')
self.assertEquals(p.evaluate([u'-b', u'spam']), ({'bar': u'spam'}, []))
self.assertEquals(
p.evaluate([u'foo']),
({'foo': ({'a': False}, [])}, [])
)
self.assertEquals(
p.evaluate([u'foo', u'-a']),
({'foo': ({'a': True}, [])}, [])
)
class TestParser(TestCase):
def test_default_evaluate_arguments(self):
old_argv = sys.argv
enc = sys.stdin.encoding or sys.getdefaultencoding()
sys.argv = [s.encode(enc) for s in [u'script_name', u'foo', u'bar']]
p = Parser()
self.assertEqual(p.evaluate(), ({}, [u'foo', u'bar']))
sys.argv = old_argv
class OutputTest(TestCase):
def setUp(self):
self.out_file = StringIO()
self._old_argv = sys.argv
sys.argv = ['script']
def tearDown(self):
self.out_file = StringIO()
sys.argv = self._old_argv
class TestParserOutput(OutputTest):
def test_alternative_commands(self):
p = Parser(
commands={
'stack': Command(),
'stash': Command(),
},
out_file=self.out_file,
takes_arguments=False
)
for cmd in [u's', u'st', u'sta']:
self.assertRaises(SystemExit, p.evaluate, [cmd])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [commands]')
self.assertContains(
output,
u'command "{0}" does not exist, did you mean?'.format(cmd)
)
self.assertContains(output, u'stack')
self.assertContains(output, u'stash')
def test_alternative_options(self):
p = Parser(
options={
'stack': Option(long='stack'),
'stash': Option(long='stash')
},
out_file=self.out_file
)
for option in [u'--s', u'--st', u'--sta']:
self.assertRaises(SystemExit, p.evaluate, [option])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options]')
self.assertContains(
output,
u'option "{0}" does not exist, did you mean?'.format(option)
)
self.assertContains(output, u'--stack')
self.assertContains(output, u'--stash')
def test_nonexisting_command(self):
p = Parser(
out_file=self.out_file,
takes_arguments=False
)
self.assertRaises(SystemExit, p.evaluate, [u'foo'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'command "foo" does not exist')
def test_nonexisting_long_option(self):
p = Parser(out_file=self.out_file)
self.assertRaises(SystemExit, p.evaluate, [u'--foo'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'option "--foo" does not exist')
def test_nonexisting_short_option(self):
p = Parser(out_file=self.out_file)
self.assertRaises(SystemExit, p.evaluate, [u'-f'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'option "-f" does not exist')
class TestHelp(OutputTest):
def test_commands(self):
p = Parser(
commands={
'foo': Command(short_description=u'foo description'),
'bar': Command(short_description=u'bar description')
},
description=u'The script description',
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [commands]',
p.long_description,
u'Commands:',
u' foo',
p.commands['foo'].short_description,
u' bar',
p.commands['bar'].short_description
])
def test_options(self):
p = Parser(
options={
'foo': Option('f'),
'bar': Option(long='bar'),
'baz': Option('b', 'baz')
},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [options]',
u'Options:',
u' -f',
u' --bar',
u' -b --baz'
])
def test_positional_arguments(self):
p = Parser(
positionals=[
Positional(u'foo'),
Positional(u'bar', short_description=u'something')
],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script foo bar',
u'Positional arguments:',
u' foo',
u'No short description.',
u' bar',
u'something'
])
def test_commands_and_options(self):
p = Parser(
commands={
'spam': Command(),
'eggs': Command()
},
options={
'foo': Option('f'),
'bar': Option('b')
},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [options] [commands]',
u'Commands:',
u' spam',
u' eggs',
u'Options:',
u' -f',
u' -b'
])
class TestUsage(OutputTest):
def test_only_commands(self):
p = Parser(
commands={'foo': Command()},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [commands]')
def test_only_options(self):
p = Parser(
options={'foo': Option('f')},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options]')
def test_commands_and_options(self):
p = Parser(
options={'foo': Option('f')},
commands={'bar': Command()},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options] [commands]')
def test_positionals(self):
p = Parser(
positionals=[
Positional('a'),
Positional('b')
],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script a b')
def test_all(self):
p = Parser(
options={'foo': Option('f')},
commands={'bar': Command()},
positionals=[Positional('baz')],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options] [commands] baz')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestNode))
suite.addTest(unittest.makeSuite(TestOption))
suite.addTest(unittest.makeSuite(TestBooleanOption))
suite.addTest(unittest.makeSuite(TestNumberOptions))
suite.addTest(unittest.makeSuite(TestMultipleOptions))
suite.addTest(unittest.makeSuite(TestPositional))
suite.addTest(unittest.makeSuite(TestNumberPositionals))
suite.addTest(unittest.makeSuite(TestCommand))
suite.addTest(unittest.makeSuite(TestParser))
suite.addTest(unittest.makeSuite(TestParserOutput))
suite.addTest(unittest.makeSuite(TestHelp))
suite.addTest(unittest.makeSuite(TestUsage))
return suite
if __name__ == "__main__":
unittest.main(defaultTest='suite')
| 33.094697 | 79 | 0.521289 |
import unittest
import sys
from decimal import Decimal
from StringIO import StringIO
from opts import (Node, Option, BooleanOption, IntOption, FloatOption,
DecimalOption, MultipleOptions, Positional, IntPositional,
FloatPositional, DecimalPositional, Command, Parser)
def xrange(*args):
if len(args) == 1:
start, stop, step = 0, args[0], 1
elif len(args) == 2:
start, stop, step = args[0], args[1], 1
else:
start, stop, step = args
i = start
while i <= stop:
yield i
i += step
class TestCase(unittest.TestCase):
def assertContains(self, container, item):
if item not in container:
raise AssertionError('{0!r} not in {1!r}'.format(item, container))
def assertContainsAll(self, container, items):
for item in items:
self.assertContains(container, item)
class TestNode(TestCase):
def test_short_description_fallback(self):
n = Node()
self.assertEqual(n.short_description, u"No short description.")
def test_long_description_fallback(self):
n = Node()
self.assertEqual(n.long_description, u"No long description.")
def test_long_description_fallback_to_short(self):
n = Node(short_description=u"Foobar")
self.assertEqual(n.long_description, u"Foobar")
class TestOption(TestCase):
def test_valueerror_on_init(self):
self.assertRaises(ValueError, Option)
class TestBooleanOption(TestCase):
def test_evaluate(self):
o = BooleanOption(short="b")
p = Parser(options=dict(b=o))
self.assertEqual(p.evaluate([u'-b']), ({'b': True}, []))
o = BooleanOption(short="b", default=True)
p = Parser(options=dict(b=o))
self.assertEqual(p.evaluate(['-b']), ({'b': False}, []))
class TestNumberOptions(TestCase):
def test_intoption_evaluate(self):
self.make_test(xrange(-10, 10), IntOption(short='o'))
def test_floatoption_evaluate(self):
self.make_test(xrange(-10.0, 10.0, 0.5), FloatOption(short='o'))
def test_decimaloption_evaluate(self):
self.make_test(
xrange(Decimal('-10.0'), Decimal('10.0'), Decimal('0.5')),
DecimalOption(short='o')
)
def make_test(self, range, o):
p = Parser(options=dict(o=o))
for i in range:
self.assertEqual(p.evaluate([u'-o', unicode(i)]), ({'o': i}, []))
class TestMultipleOptions(TestCase):
def test_evaluate_no_quotes(self):
o = MultipleOptions(short='o')
p = Parser(options=dict(o=o))
self.assertEqual(
p.evaluate([u'-o', u'foo,bar,baz']),
({'o': [u'foo', u'bar', u'baz']}, [])
)
def test_evaluate_with_quotes(self):
o = MultipleOptions(short='o')
p = Parser(options=dict(o=o))
self.assertEqual(
p.evaluate([u'-o', u'foo,"bar,baz"']),
({'o': [u'foo', u'bar,baz']}, [])
)
self.assertEqual(
p.evaluate([u'-o', u'"foo,bar",baz']),
({'o': [u'foo,bar', u'baz']}, [])
)
class TestPositional(TestCase):
def test_evaluate(self):
p = Parser(positionals=[Positional('foo')])
self.assertEquals(p.evaluate([u'spam']), ({}, [u'spam']))
class TestNumberPositionals(TestCase):
def test_intpositional_evaluate(self):
self.make_test(xrange(10), IntPositional('foo'))
def test_floatpositional_evaluate(self):
self.make_test(xrange(10, 0.5), FloatPositional('foo'))
def test_decimalpositional_evaluate(self):
self.make_test(
xrange(Decimal('0'), Decimal('10.0'), Decimal('0.5')),
DecimalPositional('foo')
)
def make_test(self, range, p):
parser = Parser(positionals=[p])
for i in range:
self.assertEqual(parser.evaluate([unicode(i)]), ({}, [i]))
class TestCommand(TestCase):
def test_remaining_arguments(self):
c = Command(options={'a': Option('a')})
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'foo']),
({'c': ({}, [u'foo'])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo']),
({'c': ({'a': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo', u'bar']),
({u'c': ({'a': u'foo'}, [u'bar'])}, [])
)
def test_options(self):
class TestDeclarative(Command):
spam = Option('a', 'asomething')
eggs = Option('b', 'bsomething')
a = TestDeclarative()
b = Command(options={
'spam': Option('a', 'asomething'),
'eggs': Option('b', 'bsomething')})
for c in [a, b]:
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo']),
({'c': ({'spam': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--asomething', u'foo']),
({'c': ({'spam': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-b', u'foo']),
({'c': ({u'eggs': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--bsomething', u'foo']),
({'c': ({u'eggs': u'foo'}, [])}, [])
)
def test_commands(self):
class TestDeclarative(Command):
spam = Command()
eggs = Command()
a = TestDeclarative()
b = Command(commands={
'spam': Command(),
'eggs': Command()})
cp = [u'script_name']
for c in [a, b]:
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'spam']),
({'c': ({u'spam': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'eggs']),
({'c': ({'eggs': ({}, [])}, [])}, [])
)
def test_abbreviations(self):
c = Command(
options={
'stack': Option(long='stack'),
'stash': Option(long='stash')},
commands={
'stack': Command(),
'stash': Command()})
p = Parser(commands=dict(c=c))
cp = [u'script_name']
for s in [u's', u'st', u'sta']:
cmd = [u'c', s]
result = ({'c': ({}, [s])}, [])
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(
p.evaluate([u'c', u'stac']),
({'c': ({u'stack': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'stas']),
({'c': ({u'stash': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--stac', u'foo']),
({'c': ({u'stack': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--stas', u'foo']),
({'c': ({u'stash': u'foo'}, [])}, [])
)
def test_disallow_abbreviated_commands(self):
class NewCommand(Command):
allow_abbreviated_commands = False
c = NewCommand(commands={
'foo': Command()
})
p = Parser(commands=dict(c=c))
self.assertEqual(p.evaluate([u'c', u'f']), ({'c': ({}, [u'f'])}, []))
def test_apply_defaults(self):
class FooParser(Parser):
activate = BooleanOption('a')
foo = Command(
options={
'spam': Option('a'),
'eggs': Option('b')
}
)
p = FooParser()
p.apply_defaults({
'activate': 'huhu',
'foo': {
'spam': 'bla',
'eggs': 'blubb'
}
})
self.assertEquals(p.options['activate'].default, 'huhu')
self.assertEquals(p.commands['foo'].options['spam'].default, 'bla')
self.assertEquals(p.commands['foo'].options['eggs'].default, 'blubb')
def test_getattr(self):
p = Parser(
options={
'activate': Option('a')
},
commands={
'foo': Command(options={
'spam': Option('b'),
'eggs': Option('c')
})
}
)
p.activate
p.foo
p.foo.spam
p.foo.eggs
def test_dynamically_adding_nodes(self):
p = Parser()
p.commands['foo'] = Command()
p.commands['foo'].options['a'] = BooleanOption('a')
p.options['bar'] = Option('b')
self.assertEquals(p.evaluate([u'-b', u'spam']), ({'bar': u'spam'}, []))
self.assertEquals(
p.evaluate([u'foo']),
({'foo': ({'a': False}, [])}, [])
)
self.assertEquals(
p.evaluate([u'foo', u'-a']),
({'foo': ({'a': True}, [])}, [])
)
class TestParser(TestCase):
def test_default_evaluate_arguments(self):
old_argv = sys.argv
enc = sys.stdin.encoding or sys.getdefaultencoding()
sys.argv = [s.encode(enc) for s in [u'script_name', u'foo', u'bar']]
p = Parser()
self.assertEqual(p.evaluate(), ({}, [u'foo', u'bar']))
sys.argv = old_argv
class OutputTest(TestCase):
def setUp(self):
self.out_file = StringIO()
self._old_argv = sys.argv
sys.argv = ['script']
def tearDown(self):
self.out_file = StringIO()
sys.argv = self._old_argv
class TestParserOutput(OutputTest):
def test_alternative_commands(self):
p = Parser(
commands={
'stack': Command(),
'stash': Command(),
},
out_file=self.out_file,
takes_arguments=False
)
for cmd in [u's', u'st', u'sta']:
self.assertRaises(SystemExit, p.evaluate, [cmd])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [commands]')
self.assertContains(
output,
u'command "{0}" does not exist, did you mean?'.format(cmd)
)
self.assertContains(output, u'stack')
self.assertContains(output, u'stash')
def test_alternative_options(self):
p = Parser(
options={
'stack': Option(long='stack'),
'stash': Option(long='stash')
},
out_file=self.out_file
)
for option in [u'--s', u'--st', u'--sta']:
self.assertRaises(SystemExit, p.evaluate, [option])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options]')
self.assertContains(
output,
u'option "{0}" does not exist, did you mean?'.format(option)
)
self.assertContains(output, u'--stack')
self.assertContains(output, u'--stash')
def test_nonexisting_command(self):
p = Parser(
out_file=self.out_file,
takes_arguments=False
)
self.assertRaises(SystemExit, p.evaluate, [u'foo'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'command "foo" does not exist')
def test_nonexisting_long_option(self):
p = Parser(out_file=self.out_file)
self.assertRaises(SystemExit, p.evaluate, [u'--foo'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'option "--foo" does not exist')
def test_nonexisting_short_option(self):
p = Parser(out_file=self.out_file)
self.assertRaises(SystemExit, p.evaluate, [u'-f'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'option "-f" does not exist')
class TestHelp(OutputTest):
def test_commands(self):
p = Parser(
commands={
'foo': Command(short_description=u'foo description'),
'bar': Command(short_description=u'bar description')
},
description=u'The script description',
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [commands]',
p.long_description,
u'Commands:',
u' foo',
p.commands['foo'].short_description,
u' bar',
p.commands['bar'].short_description
])
def test_options(self):
p = Parser(
options={
'foo': Option('f'),
'bar': Option(long='bar'),
'baz': Option('b', 'baz')
},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [options]',
u'Options:',
u' -f',
u' --bar',
u' -b --baz'
])
def test_positional_arguments(self):
p = Parser(
positionals=[
Positional(u'foo'),
Positional(u'bar', short_description=u'something')
],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script foo bar',
u'Positional arguments:',
u' foo',
u'No short description.',
u' bar',
u'something'
])
def test_commands_and_options(self):
p = Parser(
commands={
'spam': Command(),
'eggs': Command()
},
options={
'foo': Option('f'),
'bar': Option('b')
},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [options] [commands]',
u'Commands:',
u' spam',
u' eggs',
u'Options:',
u' -f',
u' -b'
])
class TestUsage(OutputTest):
def test_only_commands(self):
p = Parser(
commands={'foo': Command()},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [commands]')
def test_only_options(self):
p = Parser(
options={'foo': Option('f')},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options]')
def test_commands_and_options(self):
p = Parser(
options={'foo': Option('f')},
commands={'bar': Command()},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options] [commands]')
def test_positionals(self):
p = Parser(
positionals=[
Positional('a'),
Positional('b')
],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script a b')
def test_all(self):
p = Parser(
options={'foo': Option('f')},
commands={'bar': Command()},
positionals=[Positional('baz')],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options] [commands] baz')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestNode))
suite.addTest(unittest.makeSuite(TestOption))
suite.addTest(unittest.makeSuite(TestBooleanOption))
suite.addTest(unittest.makeSuite(TestNumberOptions))
suite.addTest(unittest.makeSuite(TestMultipleOptions))
suite.addTest(unittest.makeSuite(TestPositional))
suite.addTest(unittest.makeSuite(TestNumberPositionals))
suite.addTest(unittest.makeSuite(TestCommand))
suite.addTest(unittest.makeSuite(TestParser))
suite.addTest(unittest.makeSuite(TestParserOutput))
suite.addTest(unittest.makeSuite(TestHelp))
suite.addTest(unittest.makeSuite(TestUsage))
return suite
if __name__ == "__main__":
unittest.main(defaultTest='suite')
| true | true |
f73e489ea8762999852e1bf87637ac75a346f0ba | 803 | py | Python | src/octonag/jira_status.py | taliamax/OctoNag | 51e8cb139008f52f17aa6580a02e8553d17f821f | [
"Apache-2.0"
] | 1 | 2019-12-10T21:25:14.000Z | 2019-12-10T21:25:14.000Z | src/octonag/jira_status.py | taliamax/OctoNag | 51e8cb139008f52f17aa6580a02e8553d17f821f | [
"Apache-2.0"
] | 2 | 2020-05-06T03:06:08.000Z | 2021-06-02T00:17:58.000Z | src/octonag/jira_status.py | taliamax/OctoNag | 51e8cb139008f52f17aa6580a02e8553d17f821f | [
"Apache-2.0"
] | 1 | 2020-01-21T20:04:34.000Z | 2020-01-21T20:04:34.000Z | from .configuration import with_credentials
from .configuration import jira_url
from jira import JIRA
from jira.exceptions import JIRAError
import logging
@with_credentials(service='Jira')
def in_review(issue_id, _usr, _pwd):
if _usr is None or _pwd is None:
logging.error('Jira username or password unset.')
return None
jira = None
try:
jira = JIRA(
jira_url,
auth=(_usr, _pwd)
)
issue = jira.issue(issue_id)
if str(issue.fields.status) == 'Review':
return True
else:
return False
except JIRAError:
if jira is None:
print('Could not connect to Jira.')
else:
print('Could not check Jira ticket status on %s.' % issue_id)
return None
| 26.766667 | 73 | 0.607721 | from .configuration import with_credentials
from .configuration import jira_url
from jira import JIRA
from jira.exceptions import JIRAError
import logging
@with_credentials(service='Jira')
def in_review(issue_id, _usr, _pwd):
if _usr is None or _pwd is None:
logging.error('Jira username or password unset.')
return None
jira = None
try:
jira = JIRA(
jira_url,
auth=(_usr, _pwd)
)
issue = jira.issue(issue_id)
if str(issue.fields.status) == 'Review':
return True
else:
return False
except JIRAError:
if jira is None:
print('Could not connect to Jira.')
else:
print('Could not check Jira ticket status on %s.' % issue_id)
return None
| true | true |
f73e48adcd654c16c1853596f0b84ea2b5d54e3f | 10,263 | py | Python | castepxbin/pdos.py | zhubonan/castepxbin | 24b875cf44b83d5eac75b52cf45e378a3361e90e | [
"MIT"
] | 4 | 2021-10-08T13:24:26.000Z | 2022-02-19T08:54:31.000Z | castepxbin/pdos.py | zhubonan/castepxbin | 24b875cf44b83d5eac75b52cf45e378a3361e90e | [
"MIT"
] | 3 | 2021-01-06T22:28:52.000Z | 2021-10-20T18:20:55.000Z | castepxbin/pdos.py | zhubonan/castepxbin | 24b875cf44b83d5eac75b52cf45e378a3361e90e | [
"MIT"
] | null | null | null | """
Reader module for CASTEP pdos_bin
Written based on the example `pdos_bin.f90` file in open-source OptaDos code
"""
from enum import Enum, unique
import numpy as np
from scipy.io import FortranFile
@unique
class SpinEnum(Enum):
"""
Enum type for Spin. Only up and down.
Usage: Spin.up, Spin.down.
"""
up, down = (1, -1)
def __int__(self):
return self.value
def __float__(self):
return float(self.value)
def __str__(self):
return str(self.value)
@unique
class OrbitalType(Enum):
"""
Enum type for orbital type. Indices are basically the azimuthal quantum
number, l.
"""
s = 0
p = 1
d = 2
f = 3
def __str__(self):
return str(self.name)
@unique
class OrbitalEnum(Enum):
"""
Enum type for specific orbitals. The value are the name reported by CASTEP.
"""
s = "S"
px = "Px"
py = "Py"
pz = "Pz"
dxy = "Dxy"
dyz = "Dzy"
dz2 = "Dzz"
dxz = "Dzx"
dx2 = "Dxx-yy"
f_xxx = "Fxxx"
f_yyy = "Fyyy"
f_zzz = "Fzzz"
f_xyz = "Fxyz"
f_z_xx_yy = "Fz(xx-yy)"
f_y_zz_xx = "Fy(zz-xx)"
f_x_yy_zz = "Fx(yy-zz)"
def __int__(self):
return self.value
def __str__(self):
return str(self.name)
@property
def orbital_type(self):
"""
Returns OrbitalType of an orbital.
"""
return OrbitalType[self.name[0]]
def read_pdos_bin(filename, endian='big'):
"""
Read the pdos_bin file generated by CASTEP Spectral task.
Args:
filename (str): name of the file to be read
Returns:
A dictionary of the data that have been read.
the weights of each orbital in stored in the 'pdos_weights' array
with dimension (n_orbital, n_max_eign, n_kpoints, n_spin)
"""
esymbol = '>' if endian.upper() == 'BIG' else '>'
dint = np.dtype(esymbol + 'i4')
ddouble = np.dtype(esymbol + 'f8')
dch80 = np.dtype(esymbol + 'a80')
diarray = lambda x: '{}({},)i4'.format(esymbol, x)
ddarray = lambda x: '{}({},)f8'.format(esymbol, x)
with FortranFile(filename, header_dtype=np.dtype('>u4')) as fhandle:
fversion = fhandle.read_record(ddouble)[0]
fheader = fhandle.read_record(dch80)[0].decode()
num_kpoints = fhandle.read_record(dint)[0]
num_spins = fhandle.read_record(dint)[0]
num_popn_orb = fhandle.read_record(dint)[0]
max_eignenv = fhandle.read_record(dint)[0]
# Now we start to read more data
species = fhandle.read_record(diarray(num_popn_orb))
ion = fhandle.read_record(diarray(num_popn_orb))
am_channel = fhandle.read_record(diarray(num_popn_orb))
# Now we initialize the storage space for the weights
pdos_weights = np.zeros(
(num_popn_orb, max_eignenv, num_kpoints, num_spins),
dtype=float)
kpoint_positions = np.zeros((num_kpoints, 3), dtype=float)
num_eigenvalues = np.zeros(num_spins, dtype=int)
# Now we start to read the actual data
for nk in range(num_kpoints):
_, kpoint_positions[nk, :] = fhandle.read_record('>i4', '>(3,)f8')
for ns in range(num_spins):
_ = fhandle.read_record(dint)
num_eigenvalues[ns] = fhandle.read_record(dint)
for nb in range(num_eigenvalues[ns]):
pdos_weights[:, nb, nk, ns] = fhandle.read_record(
'>({},)f8'.format(num_popn_orb))
output = {
'fversion': fversion,
'fheader': fheader,
'num_kpoints': num_kpoints,
'num_spins': num_spins,
'num_popn_orb': num_popn_orb,
'max_eigenenv': max_eignenv,
'species': species,
'ion': ion,
'am_channel': am_channel,
'pdos_weights': pdos_weights,
'kpoints_positions': kpoint_positions,
'num_eigenvalues': num_eigenvalues,
'pdos_weights': pdos_weights,
}
return output
def reorder_pdos_data(input_items, pymatgen_labels=True, use_string_as_keys=False):
"""
Arrange the PDOS weights so it is more meaningful
The result can be used to compute PDOS for creating CompleteDos object
that can be used for Pymatgen
Args:
input_items (dict): A dictionary of the pdos information, use the
output of `read_pdos` function.
pymatgen_labels (bool): Use pymatgen Enum as the keys of the result dictionary.
Returns:
A dictionary of {Site_index: {Orbital: {Spin: weight}}}
"""
if pymatgen_labels is True:
try:
from pymatgen.electronic_structure.core import Orbital as POrbital
from pymatgen.electronic_structure.core import Spin as PSpin
except ImportError:
pymatgen_labels = False
if pymatgen_labels:
# Note that s-p labels are inferreed from dot castep output
# f labels - I know the first three is among the first three.
# There is no way to tell if they are correct, f_1 is not very informative from VASP....
orbital_mapping = [[POrbital.s], [POrbital.px, POrbital.py, POrbital.pz],
[
POrbital.dz2, POrbital.dyz, POrbital.dxz, POrbital.dx2,
POrbital.dxy
],
[
POrbital.f_1, POrbital.f_2, POrbital.f_3, POrbital.f0,
POrbital.f1, POrbital.f2, POrbital.f3
]]
Spin = PSpin
else:
# These are the orders inferred from CASTEP output
orbital_mapping = [[OrbitalEnum.s], [OrbitalEnum.px, OrbitalEnum.py, OrbitalEnum.pz],
[
OrbitalEnum.dz2, OrbitalEnum.dyz, OrbitalEnum.dxz, OrbitalEnum.dx2,
OrbitalEnum.dxy
],
[
OrbitalEnum.f_xxx, OrbitalEnum.f_yyy, OrbitalEnum.f_zzz, OrbitalEnum.f_xyz,
OrbitalEnum.f_z_xx_yy, OrbitalEnum.f_y_zz_xx, OrbitalEnum.f_x_yy_zz
]]
Spin = SpinEnum
# We take average of each kpoints from here
# One might task why not take account the kpoints weight?
# because it should be taken account of in the TDOS
weights = input_items['pdos_weights']
# Specie index for all orbitals
species = input_items['species']
# Index of each ion for all orbitals
ion = input_items['ion']
num_spins = input_items['num_spins']
# Angular momentum channel all orbitals
am_channel = input_items['am_channel']
unique_speices = np.unique(species)
unique_speices.sort()
site_index = 0
output_data = {}
# Initialise storage space
for specie in unique_speices:
specie_mask = specie == species
# Total number of ions for this specie
total_ions = ion[specie_mask].max()
# Note that indice are from one, not zero
for nion in range(1, total_ions + 1):
# Iterate through each ion
ion_mask = (ion == nion) & specie_mask
max_am = am_channel[ion_mask].max()
site_dict = {} # {Orbital: {Spin: weight}...}
for am in range(max_am + 1):
# Collect the angular momentum channels
ion_am_mask = (am_channel == am) & ion_mask
# Indices of each matched channels
ion_am_idx = np.where(ion_am_mask)[0]
for iam, iloc in enumerate(ion_am_idx):
# iloc - index of the oribtal
# You can have 4 orbitals for p channel - they have difference n numbers
this_orb = orbital_mapping[am][iam % (2 * am + 1)]
orb_dict = {} # {Spin: weight...}
if num_spins == 2:
for ispin, espin in enumerate((Spin.up, Spin.down)):
# Sumup
wtmp = weights[iloc, :, :, ispin]
orb_dict[espin] = wtmp
else:
orb_dict[Spin.up] = weights[iloc, :, :, 0]
# Now we have the orb_dict populated
# Combined the weights if this orbital has been seen...
if this_orb in site_dict:
site_dict[this_orb] = _merge_weights(
site_dict[this_orb], orb_dict)
else:
site_dict[this_orb] = orb_dict
# Now we populated site_dict add it to output_data
output_data[site_index] = site_dict
site_index += 1
return output_data
def compute_pdos(pdos_bin, eigenvalues, kpoints_weights, bins):
"""
Compute the PDOS from eigenvalue and kpoint weights
Args:
pdos_bin (str): Path to the binary pdos_bin file
eigenvealues (str): Eigenvalue as {Spin: array_)}.
kpoints_weights (np.ndarray): Weights of each kpoints.
bins: The bins for computing the density of states.
"""
# Walk through the ordred_weights dictionary and compute PDOS for each weight
ordered_weights = reorder_pdos_data(read_pdos_bin(pdos_bin))
pdos_data = {}
for site, porbs_dict in ordered_weights.items():
porbs_outdict = {}
for orb, pspin_dict in porbs_dict.items():
pdos_orbit = {
spin: np.histogram(
eigenvalue_set,
bins=bins,
weights=kpoints_weights * pspin_dict[
spin] # weight (nk, ); pspin_dict[spin] (nk, nb)
)[0]
for spin, eigenvalue_set in eigenvalues.items()
}
porbs_outdict[orb] = pdos_orbit
pdos_data[site] = porbs_outdict
return pdos_data
def _merge_weights(spin_d1, spin_d2):
"""Sum the weights stored in two dictionaries with keys being the spins"""
if len(spin_d1) != len(spin_d2):
raise RuntimeError("Critical - mismatch spin-dict length")
out = {}
for spin in spin_d1:
out[spin] = spin_d1[spin] + spin_d2[spin]
return out
| 34.555556 | 103 | 0.577804 | from enum import Enum, unique
import numpy as np
from scipy.io import FortranFile
@unique
class SpinEnum(Enum):
up, down = (1, -1)
def __int__(self):
return self.value
def __float__(self):
return float(self.value)
def __str__(self):
return str(self.value)
@unique
class OrbitalType(Enum):
s = 0
p = 1
d = 2
f = 3
def __str__(self):
return str(self.name)
@unique
class OrbitalEnum(Enum):
s = "S"
px = "Px"
py = "Py"
pz = "Pz"
dxy = "Dxy"
dyz = "Dzy"
dz2 = "Dzz"
dxz = "Dzx"
dx2 = "Dxx-yy"
f_xxx = "Fxxx"
f_yyy = "Fyyy"
f_zzz = "Fzzz"
f_xyz = "Fxyz"
f_z_xx_yy = "Fz(xx-yy)"
f_y_zz_xx = "Fy(zz-xx)"
f_x_yy_zz = "Fx(yy-zz)"
def __int__(self):
return self.value
def __str__(self):
return str(self.name)
@property
def orbital_type(self):
return OrbitalType[self.name[0]]
def read_pdos_bin(filename, endian='big'):
esymbol = '>' if endian.upper() == 'BIG' else '>'
dint = np.dtype(esymbol + 'i4')
ddouble = np.dtype(esymbol + 'f8')
dch80 = np.dtype(esymbol + 'a80')
diarray = lambda x: '{}({},)i4'.format(esymbol, x)
ddarray = lambda x: '{}({},)f8'.format(esymbol, x)
with FortranFile(filename, header_dtype=np.dtype('>u4')) as fhandle:
fversion = fhandle.read_record(ddouble)[0]
fheader = fhandle.read_record(dch80)[0].decode()
num_kpoints = fhandle.read_record(dint)[0]
num_spins = fhandle.read_record(dint)[0]
num_popn_orb = fhandle.read_record(dint)[0]
max_eignenv = fhandle.read_record(dint)[0]
species = fhandle.read_record(diarray(num_popn_orb))
ion = fhandle.read_record(diarray(num_popn_orb))
am_channel = fhandle.read_record(diarray(num_popn_orb))
pdos_weights = np.zeros(
(num_popn_orb, max_eignenv, num_kpoints, num_spins),
dtype=float)
kpoint_positions = np.zeros((num_kpoints, 3), dtype=float)
num_eigenvalues = np.zeros(num_spins, dtype=int)
for nk in range(num_kpoints):
_, kpoint_positions[nk, :] = fhandle.read_record('>i4', '>(3,)f8')
for ns in range(num_spins):
_ = fhandle.read_record(dint)
num_eigenvalues[ns] = fhandle.read_record(dint)
for nb in range(num_eigenvalues[ns]):
pdos_weights[:, nb, nk, ns] = fhandle.read_record(
'>({},)f8'.format(num_popn_orb))
output = {
'fversion': fversion,
'fheader': fheader,
'num_kpoints': num_kpoints,
'num_spins': num_spins,
'num_popn_orb': num_popn_orb,
'max_eigenenv': max_eignenv,
'species': species,
'ion': ion,
'am_channel': am_channel,
'pdos_weights': pdos_weights,
'kpoints_positions': kpoint_positions,
'num_eigenvalues': num_eigenvalues,
'pdos_weights': pdos_weights,
}
return output
def reorder_pdos_data(input_items, pymatgen_labels=True, use_string_as_keys=False):
if pymatgen_labels is True:
try:
from pymatgen.electronic_structure.core import Orbital as POrbital
from pymatgen.electronic_structure.core import Spin as PSpin
except ImportError:
pymatgen_labels = False
if pymatgen_labels:
orbital_mapping = [[POrbital.s], [POrbital.px, POrbital.py, POrbital.pz],
[
POrbital.dz2, POrbital.dyz, POrbital.dxz, POrbital.dx2,
POrbital.dxy
],
[
POrbital.f_1, POrbital.f_2, POrbital.f_3, POrbital.f0,
POrbital.f1, POrbital.f2, POrbital.f3
]]
Spin = PSpin
else:
orbital_mapping = [[OrbitalEnum.s], [OrbitalEnum.px, OrbitalEnum.py, OrbitalEnum.pz],
[
OrbitalEnum.dz2, OrbitalEnum.dyz, OrbitalEnum.dxz, OrbitalEnum.dx2,
OrbitalEnum.dxy
],
[
OrbitalEnum.f_xxx, OrbitalEnum.f_yyy, OrbitalEnum.f_zzz, OrbitalEnum.f_xyz,
OrbitalEnum.f_z_xx_yy, OrbitalEnum.f_y_zz_xx, OrbitalEnum.f_x_yy_zz
]]
Spin = SpinEnum
weights = input_items['pdos_weights']
species = input_items['species']
ion = input_items['ion']
num_spins = input_items['num_spins']
am_channel = input_items['am_channel']
unique_speices = np.unique(species)
unique_speices.sort()
site_index = 0
output_data = {}
for specie in unique_speices:
specie_mask = specie == species
total_ions = ion[specie_mask].max()
for nion in range(1, total_ions + 1):
ion_mask = (ion == nion) & specie_mask
max_am = am_channel[ion_mask].max()
site_dict = {}
for am in range(max_am + 1):
ion_am_mask = (am_channel == am) & ion_mask
ion_am_idx = np.where(ion_am_mask)[0]
for iam, iloc in enumerate(ion_am_idx):
this_orb = orbital_mapping[am][iam % (2 * am + 1)]
orb_dict = {}
if num_spins == 2:
for ispin, espin in enumerate((Spin.up, Spin.down)):
wtmp = weights[iloc, :, :, ispin]
orb_dict[espin] = wtmp
else:
orb_dict[Spin.up] = weights[iloc, :, :, 0]
if this_orb in site_dict:
site_dict[this_orb] = _merge_weights(
site_dict[this_orb], orb_dict)
else:
site_dict[this_orb] = orb_dict
output_data[site_index] = site_dict
site_index += 1
return output_data
def compute_pdos(pdos_bin, eigenvalues, kpoints_weights, bins):
ordered_weights = reorder_pdos_data(read_pdos_bin(pdos_bin))
pdos_data = {}
for site, porbs_dict in ordered_weights.items():
porbs_outdict = {}
for orb, pspin_dict in porbs_dict.items():
pdos_orbit = {
spin: np.histogram(
eigenvalue_set,
bins=bins,
weights=kpoints_weights * pspin_dict[
spin]
)[0]
for spin, eigenvalue_set in eigenvalues.items()
}
porbs_outdict[orb] = pdos_orbit
pdos_data[site] = porbs_outdict
return pdos_data
def _merge_weights(spin_d1, spin_d2):
if len(spin_d1) != len(spin_d2):
raise RuntimeError("Critical - mismatch spin-dict length")
out = {}
for spin in spin_d1:
out[spin] = spin_d1[spin] + spin_d2[spin]
return out
| true | true |
f73e497f0d130c2f6beb046e40354c51ea162268 | 3,003 | py | Python | hlwtadmin/management/commands/delete_datakunstenbe_podiumfestivalinfo.py | Kunstenpunt/havelovewilltravel | 6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83 | [
"Apache-2.0"
] | 1 | 2020-10-16T16:29:01.000Z | 2020-10-16T16:29:01.000Z | hlwtadmin/management/commands/delete_datakunstenbe_podiumfestivalinfo.py | Kunstenpunt/havelovewilltravel | 6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83 | [
"Apache-2.0"
] | 365 | 2020-02-03T12:46:53.000Z | 2022-02-27T17:20:46.000Z | hlwtadmin/management/commands/delete_datakunstenbe_podiumfestivalinfo.py | Kunstenpunt/havelovewilltravel | 6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83 | [
"Apache-2.0"
] | null | null | null | from hlwtadmin.models import Artist, GigFinderUrl, GigFinder, ConcertAnnouncement, Venue, Location, Organisation, Country, Concert, RelationConcertConcert, RelationConcertOrganisation, RelationConcertArtist, Location
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
for concert in Concert.objects.filter(concertannouncement__gigfinder__name="datakunstenbe"):
print(concert, concert.pk)
for ca in ConcertAnnouncement.objects.filter(concert=concert):
ca.concert=None
if ca.gigfinder.name == "datakunstenbe":
ca.artist = None
ca.gigfinder = None
ca.raw_venue = None
ca.delete()
else:
ca.save(update_fields=["concert"])
for relconcertartist in RelationConcertArtist.objects.filter(concert=concert):
relconcertartist.artist=None
relconcertartist.concert=None
relconcertartist.delete()
for relconcertorganisation in RelationConcertOrganisation.objects.filter(concert=concert):
relconcertorganisation.organisation = None
relconcertorganisation.concert = None
relconcertorganisation.delete()
for relconcertconcert in RelationConcertConcert.objects.filter(concert_a=concert):
relconcertconcert.concert_a = None
relconcertconcert.concert_b = None
relconcertconcert.delete()
concert.delete()
for concert in Concert.objects.filter(concertannouncement__gigfinder__name="podiumfestivalinfo"):
print(concert, concert.pk)
for ca in ConcertAnnouncement.objects.filter(concert=concert):
ca.concert=None
if ca.gigfinder.name == "podiumfestivalinfo":
ca.artist = None
ca.gigfinder = None
ca.raw_venue = None
ca.delete()
else:
ca.save(update_fields=["concert"])
for relconcertartist in RelationConcertArtist.objects.filter(concert=concert):
relconcertartist.artist = None
relconcertartist.concert = None
relconcertartist.delete()
for relconcertorganisation in RelationConcertOrganisation.objects.filter(concert=concert):
relconcertorganisation.organisation = None
relconcertorganisation.concert = None
relconcertorganisation.delete()
for relconcertconcert in RelationConcertConcert.objects.filter(concert_a=concert):
relconcertconcert.concert_a = None
relconcertconcert.concert_b = None
relconcertconcert.delete()
concert.delete()
| 43.521739 | 216 | 0.624043 | from hlwtadmin.models import Artist, GigFinderUrl, GigFinder, ConcertAnnouncement, Venue, Location, Organisation, Country, Concert, RelationConcertConcert, RelationConcertOrganisation, RelationConcertArtist, Location
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
for concert in Concert.objects.filter(concertannouncement__gigfinder__name="datakunstenbe"):
print(concert, concert.pk)
for ca in ConcertAnnouncement.objects.filter(concert=concert):
ca.concert=None
if ca.gigfinder.name == "datakunstenbe":
ca.artist = None
ca.gigfinder = None
ca.raw_venue = None
ca.delete()
else:
ca.save(update_fields=["concert"])
for relconcertartist in RelationConcertArtist.objects.filter(concert=concert):
relconcertartist.artist=None
relconcertartist.concert=None
relconcertartist.delete()
for relconcertorganisation in RelationConcertOrganisation.objects.filter(concert=concert):
relconcertorganisation.organisation = None
relconcertorganisation.concert = None
relconcertorganisation.delete()
for relconcertconcert in RelationConcertConcert.objects.filter(concert_a=concert):
relconcertconcert.concert_a = None
relconcertconcert.concert_b = None
relconcertconcert.delete()
concert.delete()
for concert in Concert.objects.filter(concertannouncement__gigfinder__name="podiumfestivalinfo"):
print(concert, concert.pk)
for ca in ConcertAnnouncement.objects.filter(concert=concert):
ca.concert=None
if ca.gigfinder.name == "podiumfestivalinfo":
ca.artist = None
ca.gigfinder = None
ca.raw_venue = None
ca.delete()
else:
ca.save(update_fields=["concert"])
for relconcertartist in RelationConcertArtist.objects.filter(concert=concert):
relconcertartist.artist = None
relconcertartist.concert = None
relconcertartist.delete()
for relconcertorganisation in RelationConcertOrganisation.objects.filter(concert=concert):
relconcertorganisation.organisation = None
relconcertorganisation.concert = None
relconcertorganisation.delete()
for relconcertconcert in RelationConcertConcert.objects.filter(concert_a=concert):
relconcertconcert.concert_a = None
relconcertconcert.concert_b = None
relconcertconcert.delete()
concert.delete()
| true | true |
f73e49ba09103087760a7f42b0899eb546f8456c | 1,705 | py | Python | jsonify.py | SalarHoushvand/discrete-math-restfulAPI | 213971d5e26eb7d2848c707e591842b24494772a | [
"MIT"
] | 3 | 2020-10-02T13:42:16.000Z | 2020-10-28T00:14:43.000Z | jsonify.py | SalarHoushvand/discrete-math-restfulAPI | 213971d5e26eb7d2848c707e591842b24494772a | [
"MIT"
] | 1 | 2020-09-25T15:48:43.000Z | 2020-09-25T15:48:43.000Z | jsonify.py | SalarHoushvand/discrete-math-restfulAPI | 213971d5e26eb7d2848c707e591842b24494772a | [
"MIT"
] | 1 | 2020-10-16T14:18:03.000Z | 2020-10-16T14:18:03.000Z | # -----------------------------------------------------------
# Functions to generate JSON files in a desired format
# from the functions inside functions.py
# -----------------------------------------------------------
def question_json_maker(question_id, question, answer, answer_index=1, question_type='MC', difficulty=1, points=10):
"""
Generates JSON file for each question.
:param question: input question.(str)
:param answer: output answer(str or int)
:param answer_index: index of the correct answer(int)
:param question_type: type of question ex: MC (Multiple Choice).(str)
:param difficulty: difficulty level of the question(1 to 5)(int)
:param points : points for each right answer. (int)
:return: a question.(JSON)
"""
questions_template = {
"questionID": question_id,
"question": question,
"questionType": question_type,
"answerSelectionType": "single",
"answers": answer,
"correctAnswer": answer_index,
"messageForCorrectAnswer": "Correct Answer",
"messageForIncorrectAnswer": "Incorrect Answer",
"difficulty": difficulty,
"point": points
}
return questions_template
def json_maker(topic='', questions_list=[]):
"""
Gets list of questions, topic and generates final version of JSON.
:type questions_list: list
:param topic: topic which question covers.(str)
:param questions_list: list containing all the questions.(list)
:return: final version of the question.(JSON)
"""
question = {
"quizTitle": topic,
"quizSynopsis": f"Test Quiz for {topic}",
"questions": questions_list}
return question
| 34.795918 | 116 | 0.621701 |
def question_json_maker(question_id, question, answer, answer_index=1, question_type='MC', difficulty=1, points=10):
questions_template = {
"questionID": question_id,
"question": question,
"questionType": question_type,
"answerSelectionType": "single",
"answers": answer,
"correctAnswer": answer_index,
"messageForCorrectAnswer": "Correct Answer",
"messageForIncorrectAnswer": "Incorrect Answer",
"difficulty": difficulty,
"point": points
}
return questions_template
def json_maker(topic='', questions_list=[]):
question = {
"quizTitle": topic,
"quizSynopsis": f"Test Quiz for {topic}",
"questions": questions_list}
return question
| true | true |
f73e4b51f4d05479384d0659ba79ece4deff2953 | 32,440 | py | Python | fhir/resources/messagedefinition.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 144 | 2019-05-08T14:24:43.000Z | 2022-03-30T02:37:11.000Z | fhir/resources/messagedefinition.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 82 | 2019-05-13T17:43:13.000Z | 2022-03-30T16:45:17.000Z | fhir/resources/messagedefinition.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
] | 48 | 2019-04-04T14:14:53.000Z | 2022-03-30T06:07:31.000Z | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/MessageDefinition
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class MessageDefinition(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A resource that defines a type of message that can be exchanged between
systems.
Defines the characteristics of a message that can be shared between
systems, including the type of event that initiates the message, the
content to be transmitted and what response(s), if any, are permitted.
"""
resource_type = Field("MessageDefinition", const=True)
allowedResponse: typing.List[
fhirtypes.MessageDefinitionAllowedResponseType
] = Field(
None,
alias="allowedResponse",
title="Responses to this message",
description=(
"Indicates what types of messages may be sent as an application-level "
"response to this message."
),
# if property is element of this resource.
element_property=True,
)
base: fhirtypes.Canonical = Field(
None,
alias="base",
title="Definition this one is based on",
description=(
"The MessageDefinition that is the basis for the contents of this "
"resource."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["MessageDefinition"],
)
base__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_base", title="Extension field for ``base``."
)
category: fhirtypes.Code = Field(
None,
alias="category",
title="consequence | currency | notification",
description="The impact of the content of the message.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["consequence", "currency", "notification"],
)
category__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_category", title="Extension field for ``category``."
)
contact: typing.List[fhirtypes.ContactDetailType] = Field(
None,
alias="contact",
title="Contact details for the publisher",
description=(
"Contact details to assist a user in finding and communicating with the"
" publisher."
),
# if property is element of this resource.
element_property=True,
)
copyright: fhirtypes.Markdown = Field(
None,
alias="copyright",
title="Use and/or publishing restrictions",
description=(
"A copyright statement relating to the message definition and/or its "
"contents. Copyright statements are generally legal restrictions on the"
" use and publishing of the message definition."
),
# if property is element of this resource.
element_property=True,
)
copyright__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_copyright", title="Extension field for ``copyright``."
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="Date last changed",
description=(
"The date (and optionally time) when the message definition was "
"published. The date must change when the business version changes and "
"it must change if the status code changes. In addition, it should "
"change when the substantive content of the message definition changes."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Natural language description of the message definition",
description=(
"A free text natural language description of the message definition "
"from a consumer's perspective."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
eventCoding: fhirtypes.CodingType = Field(
None,
alias="eventCoding",
title="Event code or link to the EventDefinition",
description="Event code or link to the EventDefinition.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e event[x]
one_of_many="event",
one_of_many_required=True,
)
eventUri: fhirtypes.Uri = Field(
None,
alias="eventUri",
title="Event code or link to the EventDefinition",
description="Event code or link to the EventDefinition.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e event[x]
one_of_many="event",
one_of_many_required=True,
)
eventUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_eventUri", title="Extension field for ``eventUri``."
)
experimental: bool = Field(
None,
alias="experimental",
title="For testing purposes, not real usage",
description=(
"A Boolean value to indicate that this message definition is authored "
"for testing purposes (or education/evaluation/marketing) and is not "
"intended to be used for genuine usage."
),
# if property is element of this resource.
element_property=True,
)
experimental__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_experimental", title="Extension field for ``experimental``."
)
focus: typing.List[fhirtypes.MessageDefinitionFocusType] = Field(
None,
alias="focus",
title="Resource(s) that are the subject of the event",
description=(
"Identifies the resource (or resources) that are being addressed by the"
" event. For example, the Encounter for an admit message or two "
"Account records for a merge."
),
# if property is element of this resource.
element_property=True,
)
graph: typing.List[fhirtypes.Canonical] = Field(
None,
alias="graph",
title="Canonical reference to a GraphDefinition",
description=(
"Canonical reference to a GraphDefinition. If a URL is provided, it is "
"the canonical reference to a [GraphDefinition](graphdefinition.html) "
"that it controls what resources are to be added to the bundle when "
"building the document. The GraphDefinition can also specify profiles "
"that apply to the various resources."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["GraphDefinition"],
)
graph__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_graph", title="Extension field for ``graph``.")
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Primary key for the message definition on a given server",
description=(
"A formal identifier that is used to identify this message definition "
"when it is represented in other formats, or referenced in a "
"specification, model, design or an instance."
),
# if property is element of this resource.
element_property=True,
)
jurisdiction: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="jurisdiction",
title="Intended jurisdiction for message definition (if applicable)",
description=(
"A legal or geographic region in which the message definition is "
"intended to be used."
),
# if property is element of this resource.
element_property=True,
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name for this message definition (computer friendly)",
description=(
"A natural language name identifying the message definition. This name "
"should be usable as an identifier for the module by machine processing"
" applications such as code generation."
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
parent: typing.List[fhirtypes.Canonical] = Field(
None,
alias="parent",
title="Protocol/workflow this is part of",
description=(
"Identifies a protocol or workflow that this MessageDefinition "
"represents a step in."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["ActivityDefinition", "PlanDefinition"],
)
parent__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_parent", title="Extension field for ``parent``.")
publisher: fhirtypes.String = Field(
None,
alias="publisher",
title="Name of the publisher (organization or individual)",
description=(
"The name of the organization or individual that published the message "
"definition."
),
# if property is element of this resource.
element_property=True,
)
publisher__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_publisher", title="Extension field for ``publisher``."
)
purpose: fhirtypes.Markdown = Field(
None,
alias="purpose",
title="Why this message definition is defined",
description=(
"Explanation of why this message definition is needed and why it has "
"been designed as it has."
),
# if property is element of this resource.
element_property=True,
)
purpose__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_purpose", title="Extension field for ``purpose``."
)
replaces: typing.List[fhirtypes.Canonical] = Field(
None,
alias="replaces",
title="Takes the place of",
description="A MessageDefinition that is superseded by this definition.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["MessageDefinition"],
)
replaces__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_replaces", title="Extension field for ``replaces``.")
responseRequired: fhirtypes.Code = Field(
None,
alias="responseRequired",
title="always | on-error | never | on-success",
description=(
"Declare at a message definition level whether a response is required "
"or only upon error or success, or never."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["always", "on-error", "never", "on-success"],
)
responseRequired__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_responseRequired",
title="Extension field for ``responseRequired``.",
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="draft | active | retired | unknown",
description=(
"The status of this message definition. Enables tracking the life-cycle"
" of the content."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "retired", "unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Name for this message definition (human friendly)",
description="A short, descriptive, user-friendly title for the message definition.",
# if property is element of this resource.
element_property=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
url: fhirtypes.Uri = Field(
None,
alias="url",
title="Business Identifier for a given MessageDefinition",
description=(
"The business identifier that is used to reference the "
"MessageDefinition and *is* expected to be consistent from server to "
"server."
),
# if property is element of this resource.
element_property=True,
)
url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_url", title="Extension field for ``url``."
)
useContext: typing.List[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="The context that the content is intended to support",
description=(
"The content was developed with a focus and intent of supporting the "
"contexts that are listed. These contexts may be general categories "
"(gender, age, ...) or may be references to specific programs "
"(insurance plans, studies, ...) and may be used to assist with "
"indexing and searching for appropriate message definition instances."
),
# if property is element of this resource.
element_property=True,
)
version: fhirtypes.String = Field(
None,
alias="version",
title="Business version of the message definition",
description=(
"The identifier that is used to identify this version of the message "
"definition when it is referenced in a specification, model, design or "
"instance. This is an arbitrary value managed by the message definition"
" author and is not expected to be globally unique. For example, it "
"might be a timestamp (e.g. yyyymmdd) if a managed version is not "
"available. There is also no expectation that versions can be placed in"
" a lexicographical sequence."
),
# if property is element of this resource.
element_property=True,
)
version__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_version", title="Extension field for ``version``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``MessageDefinition`` according specification,
with preserving original sequence order.
"""
return [
"id",
"meta",
"implicitRules",
"language",
"text",
"contained",
"extension",
"modifierExtension",
"url",
"identifier",
"version",
"name",
"title",
"replaces",
"status",
"experimental",
"date",
"publisher",
"contact",
"description",
"useContext",
"jurisdiction",
"purpose",
"copyright",
"base",
"parent",
"eventCoding",
"eventUri",
"category",
"focus",
"responseRequired",
"allowedResponse",
"graph",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_1929(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("date", "date__ext"), ("status", "status__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1929(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {"event": ["eventCoding", "eventUri"]}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class MessageDefinitionAllowedResponse(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Responses to this message.
Indicates what types of messages may be sent as an application-level
response to this message.
"""
resource_type = Field("MessageDefinitionAllowedResponse", const=True)
message: fhirtypes.Canonical = Field(
None,
alias="message",
title="Reference to allowed message definition response",
description=(
"A reference to the message definition that must be adhered to by this "
"supported response."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["MessageDefinition"],
)
message__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_message", title="Extension field for ``message``."
)
situation: fhirtypes.Markdown = Field(
None,
alias="situation",
title="When should this response be used",
description=(
"Provides a description of the circumstances in which this response "
"should be used (as opposed to one of the alternative responses)."
),
# if property is element of this resource.
element_property=True,
)
situation__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_situation", title="Extension field for ``situation``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``MessageDefinitionAllowedResponse`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "message", "situation"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3479(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("message", "message__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class MessageDefinitionFocus(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Resource(s) that are the subject of the event.
Identifies the resource (or resources) that are being addressed by the
event. For example, the Encounter for an admit message or two Account
records for a merge.
"""
resource_type = Field("MessageDefinitionFocus", const=True)
code: fhirtypes.Code = Field(
None,
alias="code",
title="Type of resource",
description="The kind of resource that must be the focus for this message.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
code__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_code", title="Extension field for ``code``."
)
max: fhirtypes.String = Field(
None,
alias="max",
title="Maximum number of focuses of this type",
description=(
"Identifies the maximum number of resources of this type that must be "
"pointed to by a message in order for it to be valid against this "
"MessageDefinition."
),
# if property is element of this resource.
element_property=True,
)
max__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_max", title="Extension field for ``max``."
)
min: fhirtypes.UnsignedInt = Field(
None,
alias="min",
title="Minimum number of focuses of this type",
description=(
"Identifies the minimum number of resources of this type that must be "
"pointed to by a message in order for it to be valid against this "
"MessageDefinition."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
min__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_min", title="Extension field for ``min``."
)
profile: fhirtypes.Canonical = Field(
None,
alias="profile",
title="Profile that must be adhered to by focus",
description=(
"A profile that reflects constraints for the focal resource (and "
"potentially for related resources)."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["StructureDefinition"],
)
profile__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_profile", title="Extension field for ``profile``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``MessageDefinitionFocus`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "code", "profile", "min", "max"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2446(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("code", "code__ext"), ("min", "min__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
| 39.85258 | 93 | 0.60561 |
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class MessageDefinition(domainresource.DomainResource):
resource_type = Field("MessageDefinition", const=True)
allowedResponse: typing.List[
fhirtypes.MessageDefinitionAllowedResponseType
] = Field(
None,
alias="allowedResponse",
title="Responses to this message",
description=(
"Indicates what types of messages may be sent as an application-level "
"response to this message."
),
element_property=True,
)
base: fhirtypes.Canonical = Field(
None,
alias="base",
title="Definition this one is based on",
description=(
"The MessageDefinition that is the basis for the contents of this "
"resource."
),
element_property=True,
enum_reference_types=["MessageDefinition"],
)
base__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_base", title="Extension field for ``base``."
)
category: fhirtypes.Code = Field(
None,
alias="category",
title="consequence | currency | notification",
description="The impact of the content of the message.",
element_property=True,
enum_values=["consequence", "currency", "notification"],
)
category__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_category", title="Extension field for ``category``."
)
contact: typing.List[fhirtypes.ContactDetailType] = Field(
None,
alias="contact",
title="Contact details for the publisher",
description=(
"Contact details to assist a user in finding and communicating with the"
" publisher."
),
element_property=True,
)
copyright: fhirtypes.Markdown = Field(
None,
alias="copyright",
title="Use and/or publishing restrictions",
description=(
"A copyright statement relating to the message definition and/or its "
"contents. Copyright statements are generally legal restrictions on the"
" use and publishing of the message definition."
),
element_property=True,
)
copyright__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_copyright", title="Extension field for ``copyright``."
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="Date last changed",
description=(
"The date (and optionally time) when the message definition was "
"published. The date must change when the business version changes and "
"it must change if the status code changes. In addition, it should "
"change when the substantive content of the message definition changes."
),
element_property=True,
element_required=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Natural language description of the message definition",
description=(
"A free text natural language description of the message definition "
"from a consumer's perspective."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
eventCoding: fhirtypes.CodingType = Field(
None,
alias="eventCoding",
title="Event code or link to the EventDefinition",
description="Event code or link to the EventDefinition.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e event[x]
one_of_many="event",
one_of_many_required=True,
)
eventUri: fhirtypes.Uri = Field(
None,
alias="eventUri",
title="Event code or link to the EventDefinition",
description="Event code or link to the EventDefinition.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e event[x]
one_of_many="event",
one_of_many_required=True,
)
eventUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_eventUri", title="Extension field for ``eventUri``."
)
experimental: bool = Field(
None,
alias="experimental",
title="For testing purposes, not real usage",
description=(
"A Boolean value to indicate that this message definition is authored "
"for testing purposes (or education/evaluation/marketing) and is not "
"intended to be used for genuine usage."
),
# if property is element of this resource.
element_property=True,
)
experimental__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_experimental", title="Extension field for ``experimental``."
)
focus: typing.List[fhirtypes.MessageDefinitionFocusType] = Field(
None,
alias="focus",
title="Resource(s) that are the subject of the event",
description=(
"Identifies the resource (or resources) that are being addressed by the"
" event. For example, the Encounter for an admit message or two "
"Account records for a merge."
),
# if property is element of this resource.
element_property=True,
)
graph: typing.List[fhirtypes.Canonical] = Field(
None,
alias="graph",
title="Canonical reference to a GraphDefinition",
description=(
"Canonical reference to a GraphDefinition. If a URL is provided, it is "
"the canonical reference to a [GraphDefinition](graphdefinition.html) "
"that it controls what resources are to be added to the bundle when "
"building the document. The GraphDefinition can also specify profiles "
"that apply to the various resources."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["GraphDefinition"],
)
graph__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_graph", title="Extension field for ``graph``.")
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Primary key for the message definition on a given server",
description=(
"A formal identifier that is used to identify this message definition "
"when it is represented in other formats, or referenced in a "
"specification, model, design or an instance."
),
# if property is element of this resource.
element_property=True,
)
jurisdiction: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="jurisdiction",
title="Intended jurisdiction for message definition (if applicable)",
description=(
"A legal or geographic region in which the message definition is "
"intended to be used."
),
# if property is element of this resource.
element_property=True,
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name for this message definition (computer friendly)",
description=(
"A natural language name identifying the message definition. This name "
"should be usable as an identifier for the module by machine processing"
" applications such as code generation."
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
parent: typing.List[fhirtypes.Canonical] = Field(
None,
alias="parent",
title="Protocol/workflow this is part of",
description=(
"Identifies a protocol or workflow that this MessageDefinition "
"represents a step in."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["ActivityDefinition", "PlanDefinition"],
)
parent__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_parent", title="Extension field for ``parent``.")
publisher: fhirtypes.String = Field(
None,
alias="publisher",
title="Name of the publisher (organization or individual)",
description=(
"The name of the organization or individual that published the message "
"definition."
),
# if property is element of this resource.
element_property=True,
)
publisher__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_publisher", title="Extension field for ``publisher``."
)
purpose: fhirtypes.Markdown = Field(
None,
alias="purpose",
title="Why this message definition is defined",
description=(
"Explanation of why this message definition is needed and why it has "
"been designed as it has."
),
# if property is element of this resource.
element_property=True,
)
purpose__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_purpose", title="Extension field for ``purpose``."
)
replaces: typing.List[fhirtypes.Canonical] = Field(
None,
alias="replaces",
title="Takes the place of",
description="A MessageDefinition that is superseded by this definition.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["MessageDefinition"],
)
replaces__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_replaces", title="Extension field for ``replaces``.")
responseRequired: fhirtypes.Code = Field(
None,
alias="responseRequired",
title="always | on-error | never | on-success",
description=(
"Declare at a message definition level whether a response is required "
"or only upon error or success, or never."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["always", "on-error", "never", "on-success"],
)
responseRequired__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_responseRequired",
title="Extension field for ``responseRequired``.",
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="draft | active | retired | unknown",
description=(
"The status of this message definition. Enables tracking the life-cycle"
" of the content."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "retired", "unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Name for this message definition (human friendly)",
description="A short, descriptive, user-friendly title for the message definition.",
# if property is element of this resource.
element_property=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
url: fhirtypes.Uri = Field(
None,
alias="url",
title="Business Identifier for a given MessageDefinition",
description=(
"The business identifier that is used to reference the "
"MessageDefinition and *is* expected to be consistent from server to "
"server."
),
# if property is element of this resource.
element_property=True,
)
url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_url", title="Extension field for ``url``."
)
useContext: typing.List[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="The context that the content is intended to support",
description=(
"The content was developed with a focus and intent of supporting the "
"contexts that are listed. These contexts may be general categories "
"(gender, age, ...) or may be references to specific programs "
"(insurance plans, studies, ...) and may be used to assist with "
"indexing and searching for appropriate message definition instances."
),
# if property is element of this resource.
element_property=True,
)
version: fhirtypes.String = Field(
None,
alias="version",
title="Business version of the message definition",
description=(
"The identifier that is used to identify this version of the message "
"definition when it is referenced in a specification, model, design or "
"instance. This is an arbitrary value managed by the message definition"
" author and is not expected to be globally unique. For example, it "
"might be a timestamp (e.g. yyyymmdd) if a managed version is not "
"available. There is also no expectation that versions can be placed in"
" a lexicographical sequence."
),
# if property is element of this resource.
element_property=True,
)
version__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_version", title="Extension field for ``version``."
)
@classmethod
def elements_sequence(cls):
return [
"id",
"meta",
"implicitRules",
"language",
"text",
"contained",
"extension",
"modifierExtension",
"url",
"identifier",
"version",
"name",
"title",
"replaces",
"status",
"experimental",
"date",
"publisher",
"contact",
"description",
"useContext",
"jurisdiction",
"purpose",
"copyright",
"base",
"parent",
"eventCoding",
"eventUri",
"category",
"focus",
"responseRequired",
"allowedResponse",
"graph",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_1929(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
required_fields = [("date", "date__ext"), ("status", "status__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1929(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
one_of_many_fields = {"event": ["eventCoding", "eventUri"]}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class MessageDefinitionAllowedResponse(backboneelement.BackboneElement):
resource_type = Field("MessageDefinitionAllowedResponse", const=True)
message: fhirtypes.Canonical = Field(
None,
alias="message",
title="Reference to allowed message definition response",
description=(
"A reference to the message definition that must be adhered to by this "
"supported response."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["MessageDefinition"],
)
message__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_message", title="Extension field for ``message``."
)
situation: fhirtypes.Markdown = Field(
None,
alias="situation",
title="When should this response be used",
description=(
"Provides a description of the circumstances in which this response "
"should be used (as opposed to one of the alternative responses)."
),
# if property is element of this resource.
element_property=True,
)
situation__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_situation", title="Extension field for ``situation``."
)
@classmethod
def elements_sequence(cls):
return ["id", "extension", "modifierExtension", "message", "situation"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3479(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
required_fields = [("message", "message__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class MessageDefinitionFocus(backboneelement.BackboneElement):
resource_type = Field("MessageDefinitionFocus", const=True)
code: fhirtypes.Code = Field(
None,
alias="code",
title="Type of resource",
description="The kind of resource that must be the focus for this message.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
code__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_code", title="Extension field for ``code``."
)
max: fhirtypes.String = Field(
None,
alias="max",
title="Maximum number of focuses of this type",
description=(
"Identifies the maximum number of resources of this type that must be "
"pointed to by a message in order for it to be valid against this "
"MessageDefinition."
),
# if property is element of this resource.
element_property=True,
)
max__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_max", title="Extension field for ``max``."
)
min: fhirtypes.UnsignedInt = Field(
None,
alias="min",
title="Minimum number of focuses of this type",
description=(
"Identifies the minimum number of resources of this type that must be "
"pointed to by a message in order for it to be valid against this "
"MessageDefinition."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
min__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_min", title="Extension field for ``min``."
)
profile: fhirtypes.Canonical = Field(
None,
alias="profile",
title="Profile that must be adhered to by focus",
description=(
"A profile that reflects constraints for the focal resource (and "
"potentially for related resources)."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["StructureDefinition"],
)
profile__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_profile", title="Extension field for ``profile``."
)
@classmethod
def elements_sequence(cls):
return ["id", "extension", "modifierExtension", "code", "profile", "min", "max"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2446(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
required_fields = [("code", "code__ext"), ("min", "min__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
| true | true |
f73e4b6ba6d652e6d4ca99e210b18a9a20b14eea | 123,266 | py | Python | kubernetes_asyncio/client/api/batch_v2alpha1_api.py | opsani/kubernetes_asyncio | 55283bf6f3690e5c0a0c589cd752221511e2be51 | [
"Apache-2.0"
] | 196 | 2018-05-23T16:55:41.000Z | 2022-03-31T10:09:40.000Z | kubernetes_asyncio/client/api/batch_v2alpha1_api.py | tomplus/kubernetes_asyncio | e8c8686ec11be3a5295ae9d5d8728299492a61f8 | [
"Apache-2.0"
] | 164 | 2018-05-20T20:39:03.000Z | 2022-03-29T22:57:04.000Z | kubernetes_asyncio/client/api/batch_v2alpha1_api.py | opsani/kubernetes_asyncio | 55283bf6f3690e5c0a0c589cd752221511e2be51 | [
"Apache-2.0"
] | 41 | 2018-06-08T00:39:53.000Z | 2022-01-12T18:19:06.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.18.20
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class BatchV2alpha1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_cron_job(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_cron_job # noqa: E501
create a CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cron_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_cron_job_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_cron_job # noqa: E501
create a CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cron_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2alpha1CronJob, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_cron_job`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_cron_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_cron_job(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_cron_job # noqa: E501
delete collection of CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_cron_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_cron_job_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_cron_job_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_cron_job # noqa: E501
delete collection of CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_cron_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_cron_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_cron_job(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_cron_job # noqa: E501
delete a CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cron_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_cron_job # noqa: E501
delete a CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cron_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_cron_job`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cron_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cron_job_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_cron_job_for_all_namespaces # noqa: E501
list or watch objects of kind CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cron_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_cron_job_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_cron_job_for_all_namespaces # noqa: E501
list or watch objects of kind CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cron_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2alpha1CronJobList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cron_job_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/cronjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_cron_job(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_cron_job # noqa: E501
list or watch objects of kind CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cron_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_cron_job_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_cron_job # noqa: E501
list or watch objects of kind CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cron_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2alpha1CronJobList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_cron_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cron_job # noqa: E501
partially update the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cron_job # noqa: E501
partially update the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2alpha1CronJob, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cron_job_status # noqa: E501
partially update status of the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cron_job_status # noqa: E501
partially update status of the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2alpha1CronJob, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cron_job(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_cron_job # noqa: E501
read the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_cron_job_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_cron_job # noqa: E501
read the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2alpha1CronJob, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'exact',
'export'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_cron_job`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_cron_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'exact' in local_var_params and local_var_params['exact'] is not None: # noqa: E501
query_params.append(('exact', local_var_params['exact'])) # noqa: E501
if 'export' in local_var_params and local_var_params['export'] is not None: # noqa: E501
query_params.append(('export', local_var_params['export'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cron_job_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_cron_job_status # noqa: E501
read status of the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_cron_job_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_cron_job_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_cron_job_status # noqa: E501
read status of the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2alpha1CronJob, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cron_job_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_cron_job_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_cron_job_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cron_job(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_cron_job # noqa: E501
replace the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_cron_job # noqa: E501
replace the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2alpha1CronJob, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_cron_job`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cron_job`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_cron_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cron_job_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_cron_job_status # noqa: E501
replace status of the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_cron_job_status # noqa: E501
replace status of the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V2alpha1CronJob, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cron_job_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_cron_job_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cron_job_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_cron_job_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 68.90218 | 1,390 | 0.665179 |
from __future__ import absolute_import
import re
import six
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio.client.exceptions import (
ApiTypeError,
ApiValueError
)
class BatchV2alpha1Api(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_cron_job(self, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs)
def create_namespaced_cron_job_with_http_info(self, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_cron_job`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_cron_job(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_cron_job_with_http_info(namespace, **kwargs)
def delete_collection_namespaced_cron_job_with_http_info(self, namespace, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_cron_job(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
def delete_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_cron_job`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None:
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds']))
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None:
query_params.append(('orphanDependents', local_var_params['orphan_dependents']))
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None:
query_params.append(('propagationPolicy', local_var_params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs)
def get_api_resources_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cron_job_for_all_namespaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
def list_cron_job_for_all_namespaces_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cron_job_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/cronjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_cron_job(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
def list_namespaced_cron_job_with_http_info(self, namespace, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None:
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks']))
if '_continue' in local_var_params and local_var_params['_continue'] is not None:
query_params.append(('continue', local_var_params['_continue']))
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None:
query_params.append(('fieldSelector', local_var_params['field_selector']))
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None:
query_params.append(('labelSelector', local_var_params['label_selector']))
if 'limit' in local_var_params and local_var_params['limit'] is not None:
query_params.append(('limit', local_var_params['limit']))
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None:
query_params.append(('resourceVersion', local_var_params['resource_version']))
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None:
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds']))
if 'watch' in local_var_params and local_var_params['watch'] is not None:
query_params.append(('watch', local_var_params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
def patch_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
if 'force' in local_var_params and local_var_params['force'] is not None:
query_params.append(('force', local_var_params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job_status(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
def patch_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job_status`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job_status`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job_status`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
if 'force' in local_var_params and local_var_params['force'] is not None:
query_params.append(('force', local_var_params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cron_job(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.read_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
def read_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'exact',
'export'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_cron_job`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'exact' in local_var_params and local_var_params['exact'] is not None:
query_params.append(('exact', local_var_params['exact']))
if 'export' in local_var_params and local_var_params['export'] is not None:
query_params.append(('export', local_var_params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cron_job_status(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
return self.read_namespaced_cron_job_status_with_http_info(name, namespace, **kwargs)
def read_namespaced_cron_job_status_with_http_info(self, name, namespace, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cron_job_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_cron_job_status`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_cron_job_status`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cron_job(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
def replace_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cron_job" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_cron_job`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cron_job`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cron_job_status(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
def replace_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cron_job_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_cron_job_status`")
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cron_job_status`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_cron_job_status`")
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None:
query_params.append(('pretty', local_var_params['pretty']))
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None:
query_params.append(('dryRun', local_var_params['dry_run']))
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None:
query_params.append(('fieldManager', local_var_params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
f73e4c28bf6bf53148217941a2d246a92baf3f8f | 2,263 | py | Python | scripts/insert_api_refs.py | mpolson64/Ax-1 | cf9e12cc1253efe0fc893f2620e99337e0927a26 | [
"MIT"
] | 1 | 2022-02-10T10:51:40.000Z | 2022-02-10T10:51:40.000Z | scripts/insert_api_refs.py | mpolson64/Ax-1 | cf9e12cc1253efe0fc893f2620e99337e0927a26 | [
"MIT"
] | null | null | null | scripts/insert_api_refs.py | mpolson64/Ax-1 | cf9e12cc1253efe0fc893f2620e99337e0927a26 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ast
import glob
import re
def list_functions(source_glob):
"""
List all of the functions and classes defined
"""
defined = []
# Iterate through each source file
for sp in glob.glob(source_glob):
module_name = sp[:-3]
module_name = module_name.replace("/", ".")
# Parse the source file into an AST
node = ast.parse(open(sp).read())
# Extract the names of all functions and classes defined in this file
defined.extend(
(n.name, module_name + "." + n.name)
for n in node.body
if (isinstance(n, ast.FunctionDef) or isinstance(n, ast.ClassDef))
)
return defined
def replace_backticks(source_path, docs_path):
markdown_glob = docs_path + "/*.md"
source_glob = source_path + "/**/*.py"
methods = list_functions(source_glob)
for f in glob.glob(markdown_glob):
for n, m in methods:
# Match backquoted mentions of the function/class name which are
# not already links
pattern = "(?<![[`])(`" + n + "`)"
link = f"[`{n}`](/api/{m.split('.')[1]}.html#{m})"
lines = open(f).readlines()
for i, l in enumerate(lines):
match = re.search(pattern, l)
if match:
print(f"{f}:{i+1} s/{match.group(0)}/{link}")
lines[i] = re.sub(pattern, link, l)
open(f, "w").writelines(lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""In markdown docs, replace backtick-quoted names of
objects exported from Ax with links to the API docs."""
)
parser.add_argument(
"--source_path",
metavar="source_path",
required=True,
help="Path to source files (e.g. 'ax/').",
)
parser.add_argument(
"--docs_path", type=str, required=True, help="Path to docs (e.g. 'docs/'."
)
args = parser.parse_args()
replace_backticks(args.source_path, args.docs_path)
| 33.279412 | 82 | 0.588157 |
import argparse
import ast
import glob
import re
def list_functions(source_glob):
defined = []
for sp in glob.glob(source_glob):
module_name = sp[:-3]
module_name = module_name.replace("/", ".")
node = ast.parse(open(sp).read())
defined.extend(
(n.name, module_name + "." + n.name)
for n in node.body
if (isinstance(n, ast.FunctionDef) or isinstance(n, ast.ClassDef))
)
return defined
def replace_backticks(source_path, docs_path):
markdown_glob = docs_path + "/*.md"
source_glob = source_path + "/**/*.py"
methods = list_functions(source_glob)
for f in glob.glob(markdown_glob):
for n, m in methods:
pattern = "(?<![[`])(`" + n + "`)"
link = f"[`{n}`](/api/{m.split('.')[1]}.html#{m})"
lines = open(f).readlines()
for i, l in enumerate(lines):
match = re.search(pattern, l)
if match:
print(f"{f}:{i+1} s/{match.group(0)}/{link}")
lines[i] = re.sub(pattern, link, l)
open(f, "w").writelines(lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""In markdown docs, replace backtick-quoted names of
objects exported from Ax with links to the API docs."""
)
parser.add_argument(
"--source_path",
metavar="source_path",
required=True,
help="Path to source files (e.g. 'ax/').",
)
parser.add_argument(
"--docs_path", type=str, required=True, help="Path to docs (e.g. 'docs/'."
)
args = parser.parse_args()
replace_backticks(args.source_path, args.docs_path)
| true | true |
f73e4d16b2cd8fa1026b82af5d125e7099e43365 | 9,695 | py | Python | siteprefs/utils.py | idlesign/django-siteprefs | dbc040b96800a73e35a3d436a5207dd658ce0c58 | [
"BSD-3-Clause"
] | 8 | 2015-01-30T11:57:45.000Z | 2021-11-07T01:21:05.000Z | siteprefs/utils.py | idlesign/django-siteprefs | dbc040b96800a73e35a3d436a5207dd658ce0c58 | [
"BSD-3-Clause"
] | 17 | 2015-03-28T18:26:40.000Z | 2020-06-05T04:35:15.000Z | siteprefs/utils.py | idlesign/django-siteprefs | dbc040b96800a73e35a3d436a5207dd658ce0c58 | [
"BSD-3-Clause"
] | 7 | 2015-03-29T10:06:14.000Z | 2020-05-29T05:22:39.000Z |
import inspect
import os
from collections import OrderedDict
from datetime import datetime
from typing import Any, Callable, Type, Generator, Tuple
from warnings import warn
from django.contrib import admin
from django.db import models
from django.utils.translation import gettext_lazy as _
from etc.toolbox import import_app_module, import_project_modules
from .settings import PREFS_MODULE_NAME
from .signals import prefs_save
class Frame:
"""Represents a frame object at a definite level of hierarchy.
To be used as context manager:
with Frame as f:
...
"""
def __init__(self, stepback: int = 0):
self.depth = stepback
def __enter__(self):
frame = inspect.currentframe().f_back
for __ in range(self.depth):
frame = frame.f_back
self.frame = frame
return self.frame
def __exit__(self, exc_type, exc_val, exc_tb):
del self.frame
class PatchedLocal:
"""Object of this class temporarily replace all module variables
considered preferences.
"""
def __init__(self, key: str, val: Any):
self.key = key
self.val = val
class Mimic:
"""Mimics other types by implementation of various special methods.
This one is deprecated if favor of setting module proxying (proxy_settings_module()).
"""
value: Any = None
def __call__(self, *args, **kwargs):
return self.value
def __str__(self):
return self.value.__str__()
def __bool__(self):
return bool(self.value)
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __len__(self):
return self.value.__len__()
def __contains__(self, item):
return self.value.__contains__(item)
def __sub__(self, other):
return self.value.__sub__(other)
def __rsub__(self, other):
return self.value.__rsub__(other)
def __add__(self, other):
return self.value.__add__(other)
def __radd__(self, other):
return self.value.__radd__(other)
def __mul__(self, other):
return self.value.__mul__(other)
def __rmul__(self, other):
return self.value.__rmul__(other)
def __lt__(self, other):
return self.value.__lt__(other)
def __le__(self, other):
return self.value.__le__(other)
def __gt__(self, other):
return self.value.__gt__(other)
def __ge__(self, other):
return self.value.__ge__(other)
def __eq__(self, other):
return self.value.__eq__(other)
def __ne__(self, other):
return self.value.__ne__(other)
class PrefProxy(Mimic):
"""Objects of this class replace app preferences."""
def __init__(
self,
name: str,
default: Any,
category: str = None,
field: models.Field = None,
verbose_name: str = None,
help_text: str = '',
static: bool = True,
readonly: bool = False
):
"""
:param name: Preference name.
:param default: Default (initial) value.
:param category: Category name the preference belongs to.
:param field: Django model field to represent this preference.
:param verbose_name: Field verbose name.
:param help_text: Field help text.
:param static: Leave this preference static (do not store in DB).
:param readonly: Make this field read only.
"""
self.name = name
self.category = category
self.default = default
self.static = static
self.help_text = help_text
if static:
readonly = True
self.readonly = readonly
if verbose_name is None:
verbose_name = name.replace('_', ' ').capitalize()
self.verbose_name = verbose_name
if field is None:
self.field = get_field_for_proxy(self)
else:
self.field = field
update_field_from_proxy(self.field, self)
@property
def value(self) -> Any:
if self.static:
val = self.default
else:
try:
val = getattr(self, 'db_value')
except AttributeError:
val = self.default
return self.field.to_python(val)
def get_value(self) -> Any:
warn('Please use .value instead .get_value().', DeprecationWarning, stacklevel=2)
return self.value
def __repr__(self):
return f'{self.name} = {self.value}'
def get_field_for_proxy(pref_proxy: PrefProxy) -> models.Field:
"""Returns a field object instance for a given PrefProxy object.
:param pref_proxy:
"""
field = {
bool: models.BooleanField,
int: models.IntegerField,
float: models.FloatField,
datetime: models.DateTimeField,
}.get(type(pref_proxy.default), models.TextField)()
update_field_from_proxy(field, pref_proxy)
return field
def update_field_from_proxy(field_obj: models.Field, pref_proxy: PrefProxy):
"""Updates field object with data from a PrefProxy object.
:param field_obj:
:param pref_proxy:
"""
attr_names = ('verbose_name', 'help_text', 'default')
for attr_name in attr_names:
setattr(field_obj, attr_name, getattr(pref_proxy, attr_name))
def get_pref_model_class(app: str, prefs: dict, get_prefs_func: Callable) -> Type[models.Model]:
"""Returns preferences model class dynamically crated for a given app or None on conflict."""
module = f'{app}.{PREFS_MODULE_NAME}'
model_dict = {
'_prefs_app': app,
'_get_prefs': staticmethod(get_prefs_func),
'__module__': module,
'Meta': type('Meta', (models.options.Options,), {
'verbose_name': _('Preference'),
'verbose_name_plural': _('Preferences'),
'app_label': app,
'managed': False,
})
}
for field_name, val_proxy in prefs.items():
model_dict[field_name] = val_proxy.field
model = type('Preferences', (models.Model,), model_dict)
def fake_save_base(self, *args, **kwargs):
updated_prefs = {
f.name: getattr(self, f.name)
for f in self._meta.fields
if not isinstance(f, models.fields.AutoField)
}
app_prefs = self._get_prefs(self._prefs_app)
for pref in app_prefs.keys():
if pref in updated_prefs:
app_prefs[pref].db_value = updated_prefs[pref]
self.pk = self._prefs_app # Make Django 1.7 happy.
prefs_save.send(sender=self, app=self._prefs_app, updated_prefs=updated_prefs)
return True
model.save_base = fake_save_base
return model
def get_pref_model_admin_class(prefs: dict) -> Type[admin.ModelAdmin]:
by_category = OrderedDict()
readonly_fields = []
for field_name, val_proxy in prefs.items():
if val_proxy.readonly:
readonly_fields.append(field_name)
if val_proxy.category not in by_category:
by_category[val_proxy.category] = []
by_category[val_proxy.category].append(field_name)
cl_model_admin_dict = {
'has_add_permission': lambda *args: False,
'has_delete_permission': lambda *args: False
}
if readonly_fields:
cl_model_admin_dict['readonly_fields'] = readonly_fields
fieldsets = []
for category, cat_prefs in by_category.items():
fieldsets.append((category, {'fields': cat_prefs}))
if fieldsets:
cl_model_admin_dict['fieldsets'] = fieldsets
model = type('PreferencesAdmin', (admin.ModelAdmin,), cl_model_admin_dict)
model.changelist_view = lambda self, request, **kwargs: self.change_view(request, '', **kwargs)
model.get_object = lambda self, *args: (
self.model(
**{
field_name: val_proxy.get_value() for field_name, val_proxy in
self.model._get_prefs(self.model._prefs_app).items()
}
)
)
return model
def get_frame_locals(stepback: int = 0) -> dict:
"""Returns locals dictionary from a given frame.
:param stepback:
"""
with Frame(stepback=stepback) as frame:
locals_dict = frame.f_locals
return locals_dict
def traverse_local_prefs(stepback: int = 0) -> Generator[Tuple[str, dict], None, None]:
"""Generator to walk through variables considered as preferences
in locals dict of a given frame.
:param stepback:
"""
locals_dict = get_frame_locals(stepback+1)
for k in locals_dict:
if not k.startswith('_') and k.upper() == k:
yield k, locals_dict
def import_module(package: str, module_name: str):
"""Imports a module from a given package.
:param package:
:param module_name:
"""
import_app_module(package, module_name)
def import_prefs():
"""Imports preferences modules from packages (apps) and project root."""
# settings.py locals if autodiscover_siteprefs() is in urls.py
settings_locals = get_frame_locals(3)
if 'self' not in settings_locals: # If not SiteprefsConfig.ready()
# Try to import project-wide prefs.
project_package = settings_locals['__package__'] # Expected project layout introduced in Django 1.4
if not project_package:
# Fallback to old layout.
project_package = os.path.split(os.path.dirname(settings_locals['__file__']))[-1]
import_module(project_package, PREFS_MODULE_NAME)
import_project_modules(PREFS_MODULE_NAME)
| 25.580475 | 108 | 0.633213 |
import inspect
import os
from collections import OrderedDict
from datetime import datetime
from typing import Any, Callable, Type, Generator, Tuple
from warnings import warn
from django.contrib import admin
from django.db import models
from django.utils.translation import gettext_lazy as _
from etc.toolbox import import_app_module, import_project_modules
from .settings import PREFS_MODULE_NAME
from .signals import prefs_save
class Frame:
def __init__(self, stepback: int = 0):
self.depth = stepback
def __enter__(self):
frame = inspect.currentframe().f_back
for __ in range(self.depth):
frame = frame.f_back
self.frame = frame
return self.frame
def __exit__(self, exc_type, exc_val, exc_tb):
del self.frame
class PatchedLocal:
def __init__(self, key: str, val: Any):
self.key = key
self.val = val
class Mimic:
value: Any = None
def __call__(self, *args, **kwargs):
return self.value
def __str__(self):
return self.value.__str__()
def __bool__(self):
return bool(self.value)
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __len__(self):
return self.value.__len__()
def __contains__(self, item):
return self.value.__contains__(item)
def __sub__(self, other):
return self.value.__sub__(other)
def __rsub__(self, other):
return self.value.__rsub__(other)
def __add__(self, other):
return self.value.__add__(other)
def __radd__(self, other):
return self.value.__radd__(other)
def __mul__(self, other):
return self.value.__mul__(other)
def __rmul__(self, other):
return self.value.__rmul__(other)
def __lt__(self, other):
return self.value.__lt__(other)
def __le__(self, other):
return self.value.__le__(other)
def __gt__(self, other):
return self.value.__gt__(other)
def __ge__(self, other):
return self.value.__ge__(other)
def __eq__(self, other):
return self.value.__eq__(other)
def __ne__(self, other):
return self.value.__ne__(other)
class PrefProxy(Mimic):
def __init__(
self,
name: str,
default: Any,
category: str = None,
field: models.Field = None,
verbose_name: str = None,
help_text: str = '',
static: bool = True,
readonly: bool = False
):
self.name = name
self.category = category
self.default = default
self.static = static
self.help_text = help_text
if static:
readonly = True
self.readonly = readonly
if verbose_name is None:
verbose_name = name.replace('_', ' ').capitalize()
self.verbose_name = verbose_name
if field is None:
self.field = get_field_for_proxy(self)
else:
self.field = field
update_field_from_proxy(self.field, self)
@property
def value(self) -> Any:
if self.static:
val = self.default
else:
try:
val = getattr(self, 'db_value')
except AttributeError:
val = self.default
return self.field.to_python(val)
def get_value(self) -> Any:
warn('Please use .value instead .get_value().', DeprecationWarning, stacklevel=2)
return self.value
def __repr__(self):
return f'{self.name} = {self.value}'
def get_field_for_proxy(pref_proxy: PrefProxy) -> models.Field:
field = {
bool: models.BooleanField,
int: models.IntegerField,
float: models.FloatField,
datetime: models.DateTimeField,
}.get(type(pref_proxy.default), models.TextField)()
update_field_from_proxy(field, pref_proxy)
return field
def update_field_from_proxy(field_obj: models.Field, pref_proxy: PrefProxy):
attr_names = ('verbose_name', 'help_text', 'default')
for attr_name in attr_names:
setattr(field_obj, attr_name, getattr(pref_proxy, attr_name))
def get_pref_model_class(app: str, prefs: dict, get_prefs_func: Callable) -> Type[models.Model]:
module = f'{app}.{PREFS_MODULE_NAME}'
model_dict = {
'_prefs_app': app,
'_get_prefs': staticmethod(get_prefs_func),
'__module__': module,
'Meta': type('Meta', (models.options.Options,), {
'verbose_name': _('Preference'),
'verbose_name_plural': _('Preferences'),
'app_label': app,
'managed': False,
})
}
for field_name, val_proxy in prefs.items():
model_dict[field_name] = val_proxy.field
model = type('Preferences', (models.Model,), model_dict)
def fake_save_base(self, *args, **kwargs):
updated_prefs = {
f.name: getattr(self, f.name)
for f in self._meta.fields
if not isinstance(f, models.fields.AutoField)
}
app_prefs = self._get_prefs(self._prefs_app)
for pref in app_prefs.keys():
if pref in updated_prefs:
app_prefs[pref].db_value = updated_prefs[pref]
self.pk = self._prefs_app
prefs_save.send(sender=self, app=self._prefs_app, updated_prefs=updated_prefs)
return True
model.save_base = fake_save_base
return model
def get_pref_model_admin_class(prefs: dict) -> Type[admin.ModelAdmin]:
by_category = OrderedDict()
readonly_fields = []
for field_name, val_proxy in prefs.items():
if val_proxy.readonly:
readonly_fields.append(field_name)
if val_proxy.category not in by_category:
by_category[val_proxy.category] = []
by_category[val_proxy.category].append(field_name)
cl_model_admin_dict = {
'has_add_permission': lambda *args: False,
'has_delete_permission': lambda *args: False
}
if readonly_fields:
cl_model_admin_dict['readonly_fields'] = readonly_fields
fieldsets = []
for category, cat_prefs in by_category.items():
fieldsets.append((category, {'fields': cat_prefs}))
if fieldsets:
cl_model_admin_dict['fieldsets'] = fieldsets
model = type('PreferencesAdmin', (admin.ModelAdmin,), cl_model_admin_dict)
model.changelist_view = lambda self, request, **kwargs: self.change_view(request, '', **kwargs)
model.get_object = lambda self, *args: (
self.model(
**{
field_name: val_proxy.get_value() for field_name, val_proxy in
self.model._get_prefs(self.model._prefs_app).items()
}
)
)
return model
def get_frame_locals(stepback: int = 0) -> dict:
with Frame(stepback=stepback) as frame:
locals_dict = frame.f_locals
return locals_dict
def traverse_local_prefs(stepback: int = 0) -> Generator[Tuple[str, dict], None, None]:
locals_dict = get_frame_locals(stepback+1)
for k in locals_dict:
if not k.startswith('_') and k.upper() == k:
yield k, locals_dict
def import_module(package: str, module_name: str):
import_app_module(package, module_name)
def import_prefs():
settings_locals = get_frame_locals(3)
if 'self' not in settings_locals:
project_package = settings_locals['__package__']
if not project_package:
project_package = os.path.split(os.path.dirname(settings_locals['__file__']))[-1]
import_module(project_package, PREFS_MODULE_NAME)
import_project_modules(PREFS_MODULE_NAME)
| true | true |
f73e4e29c974bf5d8bdee323ccc96a8aac191964 | 13,742 | py | Python | src/transformers/modeling_tf_performer_attention.py | TwinMooon/transformers-plus-performers | c17d6473deb5316363f60bb2ddd1007d4364abe4 | [
"Apache-2.0"
] | null | null | null | src/transformers/modeling_tf_performer_attention.py | TwinMooon/transformers-plus-performers | c17d6473deb5316363f60bb2ddd1007d4364abe4 | [
"Apache-2.0"
] | null | null | null | src/transformers/modeling_tf_performer_attention.py | TwinMooon/transformers-plus-performers | c17d6473deb5316363f60bb2ddd1007d4364abe4 | [
"Apache-2.0"
] | 1 | 2021-12-15T00:23:27.000Z | 2021-12-15T00:23:27.000Z | from typing import Optional, Union
import logging
import numpy as np
import tensorflow as tf
from .configuration_performer_attention import PerformerAttentionConfig
from .modeling_utils import (
find_pruneable_heads_and_indices,
prune_linear_layer
)
KERNEL_CALLABLES = {
'cosh': lambda x, h: tf.concat((tf.exp(h + x), tf.exp(h - x)), dim=-1),
'exp': lambda x, h: tf.exp(h + x), # Default
'elu': lambda x: tf.nn.elu(x) + 1,
'relu': tf.nn.relu
}
SHORT_SEQUENCE_BEHAVIOR_CALLABLES = {
'use_softmax_eval_only': lambda L, M, training: False if training else L < 2.0 * M,
'use_softmax_eval_and_train': lambda L, M, training: L < 2.0 * M,
'never_use_softmax': lambda L, M, training: False
}
class TFPerformerAttention(tf.keras.layers.Layer):
def __init__(self, config: Optional[Union[dict, PerformerAttentionConfig]] = None, **kwargs):
super().__init__()
if config is not None:
# config can either be a dictionary or a PerformerAttentionConfig object
if not isinstance(config, dict):
config = config.__dict__
# Just copy over all the parameters
self.__dict__.update(config)
else:
# Make sure we have all the default values filled in
config = PerformerAttentionConfig(**kwargs)
kwargs = config.__dict__
# kwargs take precedence over the default values that might be stored in the config object
self.__dict__.update(kwargs)
if self.num_heads is None or self.d_model is None:
raise ValueError("PerformerAttention: num_heads and d_model must be non-None")
self.dropout = tf.keras.layers.Dropout(rate=self.attention_dropout)
self.calls_since_last_redraw = 0
self.random_features = None
behavior = self.short_sequence_behavior
if not behavior:
behavior = 'never_use_softmax' if self.kernel_type == 'relu' else 'use_softmax_eval_only'
self.should_fallback_to_softmax = SHORT_SEQUENCE_BEHAVIOR_CALLABLES[behavior]
elif self.kernel_type == 'relu' and behavior != 'never_use_softmax':
raise ValueError(f"PerformerAttention: short_sequence_behavior = {behavior} cannot be combined with the relu "
"kernel type")
elif isinstance(behavior, str):
self.should_fallback_to_softmax = SHORT_SEQUENCE_BEHAVIOR_CALLABLES[behavior]
elif callable(behavior):
self.should_fallback_to_softmax = behavior
else:
raise ValueError("PerformerAttention: short_sequence_behavior must be either str or Callable")
self.kernel_fn = KERNEL_CALLABLES[self.kernel_type]
assert self.d_model % self.num_heads == 0
if self.use_qkv_linear_layers:
self.q_lin = tf.keras.layers.Dense(units=self.d_model)
self.k_lin = tf.keras.layers.Dense(units=self.d_model)
self.v_lin = tf.keras.layers.Dense(units=self.d_model)
self.out_lin = tf.keras.layers.Dense(units=self.d_model)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.d_model // self.num_heads
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, attention_head_size, self.pruned_heads)
# Prune linear layers
if self.use_qkv_linear_layers:
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.num_heads = self.num_heads - len(heads)
self.d_model = attention_head_size * self.num_heads
self.pruned_heads = self.pruned_heads.union(heads)
def redraw_features_now(self):
self._generate_feature_matrix()
if self.training and self.redraw_verbose:
logging.info("PerformerAttention: Just redrew random features.")
self.calls_since_last_redraw = 0
def call(self, query, key, value, mask=None, head_mask=None, output_attentions=False):
"""
Parameters:
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Returns:
weights: tf.tensor(bs, num_heads, seq_length, seq_length) Attention weights context: tf.tensor(bs,
seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
"""
bs, q_length, dim = query.size()
k_length = key.size(1)
# assert dim == self.d_model, 'Dimensions do not match: %s input vs %s configured' % (dim, self.d_model)
# assert key.size() == value.size()
dim_per_head = self.d_model // self.num_heads
mask_reshp = (bs, 1, 1, k_length)
def shape(x):
""" separate heads """
new_shape = tf.concat((x.shape[:-1], tf.constant([self.num_heads, dim_per_head])), axis=0)
return tf.transpose(tf.reshape(x, new_shape), perm=[0, 2, 1, 3])
if self.use_qkv_linear_layers:
q = self.q_lin(query)
k = self.k_lin(key)
v = self.v_lin(value)
else:
q, k, v = query, key, value
# (bs, num_heads, q_length, dim_per_head)
q, k, v = (shape(x) for x in (q, k, v))
# If the sequence length is short enough that FAVOR+ would use considerably more time and/or memory than just
# using softmax attention, use softmax. This works because FAVOR+ is an unbiased estimator of softmax attention.
m = round(dim_per_head * np.log(dim_per_head)) # m is the number of random features
if self.should_fallback_to_softmax(q_length, m, self.training):
scores = q @ tf.linalg.matrix_transpose(k) / (dim ** 0.5)
if mask is not None:
mask = tf.reshape((mask == 0), mask_reshp) # .expand_as(scores) # (bs, num_heads, q_length, k_length)
scores -= 1e9 * tf.cast(mask, q.dtype) # (bs, num_heads, q_length, k_length)
attn_map = tf.nn.softmax(scores, dim=-1)
attn_map = self.dropout(attn_map) # (bs, num_heads, q_length, k_length)
return self._finalize_attention_output(attn_map @ v, head_mask, attn_map)
# When we're using FAVOR+ we can't output the attention matrix
if output_attentions:
raise ValueError("TFPerformerAttention: Can't output attention maps when using FAVOR+ linear attention.")
self._redraw_features_if_needed()
# Get the transformed values of Q and K
q_prime, k_prime = self.get_projected_queries_and_keys(q, k)
return self.compute_attention_with_projected_queries_and_keys(q_prime, k_prime, v, mask, head_mask)
# Turns Q into Q', K into K'
def get_projected_queries_and_keys(self, q, k):
# Broadcast the feature matrix across the batch dimension
# new_shape = list(q.shape)
# new_shape[-2] = self.random_features.shape[-2]
W_t = tf.linalg.matrix_transpose(self.random_features) # .expand(new_shape)
# Instead of dividing the product QK^T by sqrt(d), we divide Q and K by the 4th root of d.
q = q / (self.d_model ** 0.25)
k = k / (self.d_model ** 0.25)
projected_q = q @ W_t
projected_k = k @ W_t
# Special logic for kernels that attempt to approximate softmax
if self.kernel_type in ('cosh', 'exp'):
# The h(x) function is defined in Lemma 1 in Choromanski et al. pg. 4 as exp(-||x||**2 / 2). For numerical
# stability we leverage the fact that exp(x)*exp(y) = exp(x + y) here and delay computing the exp().
h_of_q = -tf.reduce_sum(q ** 2, dim=-1, keepdim=True) / 2
h_of_k = -tf.reduce_sum(k ** 2, dim=-1, keepdim=True) / 2
# Compute the numerical stabilizer that we subtract from the input to exp(). For some reason the original
# Jax implementation uses different types of stabilizers for queries vs. keys, and we follow that here.
q_stabilizer = tf.math.reduce_max(h_of_q, axis=-1, keepdims=True)
# This is just a scalar
k_stabilizer = tf.math.reduce_max(h_of_k)
q_kernel_output = self.kernel_fn(projected_q - q_stabilizer, h_of_q)
k_kernel_output = self.kernel_fn(projected_k - k_stabilizer, h_of_k)
# By multiplying by 1/sqrt(m), we ensure the final matrix product will contain a factor of 1/m. This means
# each row of Q'K'^T can be interpreted as an average over the exp(omega^T * q) * exp(omega^T * k) terms.
normalizing_constant = (q_kernel_output.shape[-1] ** -0.5)
q_prime = normalizing_constant * (q_kernel_output + self.kernel_epsilon)
k_prime = normalizing_constant * (k_kernel_output + self.kernel_epsilon)
return q_prime, k_prime
# Generalized attention (ReLU, ELU...)
else:
return (self.kernel_fn(x) + self.kernel_epsilon for x in (projected_q, projected_k))
def compute_attention_with_projected_queries_and_keys(self, q_prime, k_prime, v, mask = None, head_mask = None):
# Apply the padding mask to K'. Also applying it to Q' would be redundant.
if mask is not None:
k_prime *= tf.expand_dims(tf.expand_dims(mask, 1), -1)#.expand_as(k_prime)
k_prime_t = tf.linalg.matrix_transpose(k_prime)
output = q_prime @ (k_prime_t @ v)
# Ensure that the output vectors are convex combinations of input vectors; that is,
# the implied attention scores sum to 1
if self.normalize_output:
# Equivalent to multiplying K'^T by a ones vector
d = q_prime @ tf.expand_dims(tf.math.reduce_sum(k_prime), -1)
# Avoid dividing by very small numbers
d += 2 * self.normalization_stabilizer * (tf.abs(d) <= self.normalization_stabilizer)
output /= d
return self._finalize_attention_output(output, head_mask)
def _finalize_attention_output(self, context, head_mask=None, att_map_to_output=None):
def unshape(x):
""" group heads """
x = tf.transpose(context, perm=[0, 2, 1, 3]) # [...seq_len, num_heads, dim_per_head]
new_last_dim = tf.constant(x.shape[-2] * x.shape[-1]) # Multiply num_heads * dim_per_head
return tf.reshape(x, tf.concat((x.shape[:-2], new_last_dim), axis=0))
# Mask heads if we want to
if head_mask is not None:
context = context * head_mask
context = unshape(context) # (bs, q_length, dim)
context = self.out_lin(context) # (bs, q_length, dim)
if att_map_to_output:
return context, att_map_to_output
else:
return context,
def _generate_feature_matrix(self):
dim_per_head = self.d_model // self.num_heads
num_rows = round(dim_per_head * np.log(dim_per_head))
if not self.use_orthogonal_features:
return tf.random.normal((num_rows, dim_per_head))
def get_square_block(size):
with tf.device('/CPU:0'):
unstructured_block = tf.random.normal((size, size))
orthog, r = tf.linalg.qr(unstructured_block)
return orthog.t()
num_full_blocks = num_rows // dim_per_head
block_list = [get_square_block(dim_per_head) for _ in range(num_full_blocks)]
remaining_rows = num_rows - num_full_blocks * dim_per_head
if remaining_rows > 0:
q = get_square_block(dim_per_head)
block_list.append(q[:remaining_rows])
final_matrix = tf.concat(block_list)
# This option yields SMREG
if self.regularize_feature_norms:
final_matrix *= dim_per_head ** 0.5
else:
# Hack to make the matrix columns have the norm we would expect them to have if they were sampled straight
# from a Gaussian, instead of being all norm 1 since they went through QR decomposition
multiplier = tf.random.normal((num_rows, dim_per_head)).norm(dim = 1)
final_matrix = tf.linalg.diag(multiplier) @ final_matrix
self.random_features = final_matrix
def _redraw_features_if_needed(self):
# We haven't created the projection matrix yet, let's create it
if self.random_features is None:
self._generate_feature_matrix()
elif self.feature_redraw_interval is not None:
if self.redraw_stochastically:
# Flip a (very biased) coin
if np.random.default_rng().binomial(1, 1. / self.feature_redraw_interval):
self.redraw_features_now()
# It's time to redraw the projection matrix
elif self.calls_since_last_redraw >= self.feature_redraw_interval:
self.redraw_features_now()
# Keep track of how many forward passes we do before we redraw again
else:
self.calls_since_last_redraw += 1
| 45.654485 | 122 | 0.622398 | from typing import Optional, Union
import logging
import numpy as np
import tensorflow as tf
from .configuration_performer_attention import PerformerAttentionConfig
from .modeling_utils import (
find_pruneable_heads_and_indices,
prune_linear_layer
)
KERNEL_CALLABLES = {
'cosh': lambda x, h: tf.concat((tf.exp(h + x), tf.exp(h - x)), dim=-1),
'exp': lambda x, h: tf.exp(h + x),
'elu': lambda x: tf.nn.elu(x) + 1,
'relu': tf.nn.relu
}
SHORT_SEQUENCE_BEHAVIOR_CALLABLES = {
'use_softmax_eval_only': lambda L, M, training: False if training else L < 2.0 * M,
'use_softmax_eval_and_train': lambda L, M, training: L < 2.0 * M,
'never_use_softmax': lambda L, M, training: False
}
class TFPerformerAttention(tf.keras.layers.Layer):
def __init__(self, config: Optional[Union[dict, PerformerAttentionConfig]] = None, **kwargs):
super().__init__()
if config is not None:
if not isinstance(config, dict):
config = config.__dict__
self.__dict__.update(config)
else:
config = PerformerAttentionConfig(**kwargs)
kwargs = config.__dict__
self.__dict__.update(kwargs)
if self.num_heads is None or self.d_model is None:
raise ValueError("PerformerAttention: num_heads and d_model must be non-None")
self.dropout = tf.keras.layers.Dropout(rate=self.attention_dropout)
self.calls_since_last_redraw = 0
self.random_features = None
behavior = self.short_sequence_behavior
if not behavior:
behavior = 'never_use_softmax' if self.kernel_type == 'relu' else 'use_softmax_eval_only'
self.should_fallback_to_softmax = SHORT_SEQUENCE_BEHAVIOR_CALLABLES[behavior]
elif self.kernel_type == 'relu' and behavior != 'never_use_softmax':
raise ValueError(f"PerformerAttention: short_sequence_behavior = {behavior} cannot be combined with the relu "
"kernel type")
elif isinstance(behavior, str):
self.should_fallback_to_softmax = SHORT_SEQUENCE_BEHAVIOR_CALLABLES[behavior]
elif callable(behavior):
self.should_fallback_to_softmax = behavior
else:
raise ValueError("PerformerAttention: short_sequence_behavior must be either str or Callable")
self.kernel_fn = KERNEL_CALLABLES[self.kernel_type]
assert self.d_model % self.num_heads == 0
if self.use_qkv_linear_layers:
self.q_lin = tf.keras.layers.Dense(units=self.d_model)
self.k_lin = tf.keras.layers.Dense(units=self.d_model)
self.v_lin = tf.keras.layers.Dense(units=self.d_model)
self.out_lin = tf.keras.layers.Dense(units=self.d_model)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.d_model // self.num_heads
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, attention_head_size, self.pruned_heads)
if self.use_qkv_linear_layers:
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
self.num_heads = self.num_heads - len(heads)
self.d_model = attention_head_size * self.num_heads
self.pruned_heads = self.pruned_heads.union(heads)
def redraw_features_now(self):
self._generate_feature_matrix()
if self.training and self.redraw_verbose:
logging.info("PerformerAttention: Just redrew random features.")
self.calls_since_last_redraw = 0
def call(self, query, key, value, mask=None, head_mask=None, output_attentions=False):
bs, q_length, dim = query.size()
k_length = key.size(1)
dim_per_head = self.d_model // self.num_heads
mask_reshp = (bs, 1, 1, k_length)
def shape(x):
new_shape = tf.concat((x.shape[:-1], tf.constant([self.num_heads, dim_per_head])), axis=0)
return tf.transpose(tf.reshape(x, new_shape), perm=[0, 2, 1, 3])
if self.use_qkv_linear_layers:
q = self.q_lin(query)
k = self.k_lin(key)
v = self.v_lin(value)
else:
q, k, v = query, key, value
q, k, v = (shape(x) for x in (q, k, v))
m = round(dim_per_head * np.log(dim_per_head))
if self.should_fallback_to_softmax(q_length, m, self.training):
scores = q @ tf.linalg.matrix_transpose(k) / (dim ** 0.5)
if mask is not None:
mask = tf.reshape((mask == 0), mask_reshp) ast(mask, q.dtype)
attn_map = tf.nn.softmax(scores, dim=-1)
attn_map = self.dropout(attn_map)
return self._finalize_attention_output(attn_map @ v, head_mask, attn_map)
if output_attentions:
raise ValueError("TFPerformerAttention: Can't output attention maps when using FAVOR+ linear attention.")
self._redraw_features_if_needed()
# Get the transformed values of Q and K
q_prime, k_prime = self.get_projected_queries_and_keys(q, k)
return self.compute_attention_with_projected_queries_and_keys(q_prime, k_prime, v, mask, head_mask)
# Turns Q into Q', K into K'
def get_projected_queries_and_keys(self, q, k):
# Broadcast the feature matrix across the batch dimension
# new_shape = list(q.shape)
# new_shape[-2] = self.random_features.shape[-2]
W_t = tf.linalg.matrix_transpose(self.random_features) # .expand(new_shape)
# Instead of dividing the product QK^T by sqrt(d), we divide Q and K by the 4th root of d.
q = q / (self.d_model ** 0.25)
k = k / (self.d_model ** 0.25)
projected_q = q @ W_t
projected_k = k @ W_t
# Special logic for kernels that attempt to approximate softmax
if self.kernel_type in ('cosh', 'exp'):
# The h(x) function is defined in Lemma 1 in Choromanski et al. pg. 4 as exp(-||x||**2 / 2). For numerical
# stability we leverage the fact that exp(x)*exp(y) = exp(x + y) here and delay computing the exp().
h_of_q = -tf.reduce_sum(q ** 2, dim=-1, keepdim=True) / 2
h_of_k = -tf.reduce_sum(k ** 2, dim=-1, keepdim=True) / 2
# Compute the numerical stabilizer that we subtract from the input to exp(). For some reason the original
# Jax implementation uses different types of stabilizers for queries vs. keys, and we follow that here.
q_stabilizer = tf.math.reduce_max(h_of_q, axis=-1, keepdims=True)
# This is just a scalar
k_stabilizer = tf.math.reduce_max(h_of_k)
q_kernel_output = self.kernel_fn(projected_q - q_stabilizer, h_of_q)
k_kernel_output = self.kernel_fn(projected_k - k_stabilizer, h_of_k)
# By multiplying by 1/sqrt(m), we ensure the final matrix product will contain a factor of 1/m. This means
# each row of Q'K'^T can be interpreted as an average over the exp(omega^T * q) * exp(omega^T * k) terms.
normalizing_constant = (q_kernel_output.shape[-1] ** -0.5)
q_prime = normalizing_constant * (q_kernel_output + self.kernel_epsilon)
k_prime = normalizing_constant * (k_kernel_output + self.kernel_epsilon)
return q_prime, k_prime
# Generalized attention (ReLU, ELU...)
else:
return (self.kernel_fn(x) + self.kernel_epsilon for x in (projected_q, projected_k))
def compute_attention_with_projected_queries_and_keys(self, q_prime, k_prime, v, mask = None, head_mask = None):
# Apply the padding mask to K'. Also applying it to Q' would be redundant.
if mask is not None:
k_prime *= tf.expand_dims(tf.expand_dims(mask, 1), -1)#.expand_as(k_prime)
k_prime_t = tf.linalg.matrix_transpose(k_prime)
output = q_prime @ (k_prime_t @ v)
# Ensure that the output vectors are convex combinations of input vectors; that is,
# the implied attention scores sum to 1
if self.normalize_output:
# Equivalent to multiplying K'^T by a ones vector
d = q_prime @ tf.expand_dims(tf.math.reduce_sum(k_prime), -1)
d += 2 * self.normalization_stabilizer * (tf.abs(d) <= self.normalization_stabilizer)
output /= d
return self._finalize_attention_output(output, head_mask)
def _finalize_attention_output(self, context, head_mask=None, att_map_to_output=None):
def unshape(x):
x = tf.transpose(context, perm=[0, 2, 1, 3])
new_last_dim = tf.constant(x.shape[-2] * x.shape[-1])
return tf.reshape(x, tf.concat((x.shape[:-2], new_last_dim), axis=0))
if head_mask is not None:
context = context * head_mask
context = unshape(context)
context = self.out_lin(context)
if att_map_to_output:
return context, att_map_to_output
else:
return context,
def _generate_feature_matrix(self):
dim_per_head = self.d_model // self.num_heads
num_rows = round(dim_per_head * np.log(dim_per_head))
if not self.use_orthogonal_features:
return tf.random.normal((num_rows, dim_per_head))
def get_square_block(size):
with tf.device('/CPU:0'):
unstructured_block = tf.random.normal((size, size))
orthog, r = tf.linalg.qr(unstructured_block)
return orthog.t()
num_full_blocks = num_rows // dim_per_head
block_list = [get_square_block(dim_per_head) for _ in range(num_full_blocks)]
remaining_rows = num_rows - num_full_blocks * dim_per_head
if remaining_rows > 0:
q = get_square_block(dim_per_head)
block_list.append(q[:remaining_rows])
final_matrix = tf.concat(block_list)
if self.regularize_feature_norms:
final_matrix *= dim_per_head ** 0.5
else:
multiplier = tf.random.normal((num_rows, dim_per_head)).norm(dim = 1)
final_matrix = tf.linalg.diag(multiplier) @ final_matrix
self.random_features = final_matrix
def _redraw_features_if_needed(self):
if self.random_features is None:
self._generate_feature_matrix()
elif self.feature_redraw_interval is not None:
if self.redraw_stochastically:
if np.random.default_rng().binomial(1, 1. / self.feature_redraw_interval):
self.redraw_features_now()
elif self.calls_since_last_redraw >= self.feature_redraw_interval:
self.redraw_features_now()
# Keep track of how many forward passes we do before we redraw again
else:
self.calls_since_last_redraw += 1
| true | true |
f73e4e75ba03100756936221c31bb5303f3d9e4f | 417 | py | Python | posts/migrations/0002_auto_20200312_1113.py | Duskhorizon/discoplaytogether | e74a11b0f65d14db6f15d1bb0536411dd546eda6 | [
"MIT"
] | null | null | null | posts/migrations/0002_auto_20200312_1113.py | Duskhorizon/discoplaytogether | e74a11b0f65d14db6f15d1bb0536411dd546eda6 | [
"MIT"
] | null | null | null | posts/migrations/0002_auto_20200312_1113.py | Duskhorizon/discoplaytogether | e74a11b0f65d14db6f15d1bb0536411dd546eda6 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-12 11:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='channel',
),
migrations.RemoveField(
model_name='event',
name='server',
),
]
| 18.954545 | 47 | 0.544365 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='channel',
),
migrations.RemoveField(
model_name='event',
name='server',
),
]
| true | true |
f73e4e9197fbb0567c431ad804b5e4891508c915 | 2,697 | py | Python | tvm/dmlc-core/tracker/dmlc_tracker/launcher.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 236 | 2019-05-19T01:48:11.000Z | 2022-03-31T09:03:54.000Z | tvm/dmlc-core/tracker/dmlc_tracker/launcher.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 248 | 2019-05-17T19:18:36.000Z | 2022-03-30T21:25:47.000Z | tvm/dmlc-core/tracker/dmlc_tracker/launcher.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 85 | 2019-05-17T20:09:27.000Z | 2022-02-28T20:19:00.000Z | #!/usr/bin/env python
# pylint: disable=invalid-name
"""The container launcher script that launches DMLC with the right env variable."""
import glob
import sys
import os
import subprocess
def unzip_archives(ar_list, env):
for fname in ar_list:
if not os.path.exists(fname):
continue
if fname.endswith('.zip'):
subprocess.call(args=['unzip', fname], env=env)
elif fname.find('.tar') != -1:
subprocess.call(args=['tar', '-xf', fname], env=env)
def main():
"""Main moduke of the launcher."""
if len(sys.argv) < 2:
print('Usage: launcher.py your command')
sys.exit(0)
hadoop_home = os.getenv('HADOOP_HOME')
hdfs_home = os.getenv('HADOOP_HDFS_HOME')
java_home = os.getenv('JAVA_HOME')
hadoop_home = os.getenv('HADOOP_PREFIX') if hadoop_home is None else hadoop_home
cluster = os.getenv('DMLC_JOB_CLUSTER')
assert cluster is not None, 'need to have DMLC_JOB_CLUSTER'
env = os.environ.copy()
library_path = ['./']
class_path = []
if cluster == 'yarn':
assert hadoop_home is not None, 'need to set HADOOP_HOME'
assert hdfs_home is not None, 'need to set HADOOP_HDFS_HOME'
assert java_home is not None, 'need to set JAVA_HOME'
if cluster == 'sge':
num_worker = int(env['DMLC_NUM_WORKER'])
task_id = int(env['DMLC_TASK_ID'])
if task_id < num_worker:
env['DMLC_ROLE'] = 'worker'
else:
env['DMLC_ROLE'] = 'server'
if hadoop_home:
library_path.append('%s/lib/native' % hdfs_home)
library_path.append('%s/lib' % hdfs_home)
(classpath, _) = subprocess.Popen('%s/bin/hadoop classpath' % hadoop_home,
stdout=subprocess.PIPE, shell=True,
env=os.environ).communicate()
for f in classpath.split(':'):
class_path += glob.glob(f)
if java_home:
library_path.append('%s/jre/lib/amd64/server' % java_home)
env['CLASSPATH'] = '${CLASSPATH}:' + (':'.join(class_path))
# setup hdfs options
if 'DMLC_HDFS_OPTS' in env:
env['LIBHDFS_OPTS'] = env['DMLC_HDFS_OPTS']
elif 'LIBHDFS_OPTS' not in env:
env['LIBHDFS_OPTS'] = '--Xmx128m'
LD_LIBRARY_PATH = env['LD_LIBRARY_PATH'] if 'LD_LIBRARY_PATH' in env else ''
env['LD_LIBRARY_PATH'] = LD_LIBRARY_PATH + ':' + ':'.join(library_path)
# unzip the archives.
if 'DMLC_JOB_ARCHIVES' in env:
unzip_archives(env['DMLC_JOB_ARCHIVES'].split(':'), env)
ret = subprocess.call(args=sys.argv[1:], env=env)
sys.exit(ret)
if __name__ == '__main__':
main()
| 32.890244 | 84 | 0.609937 |
import glob
import sys
import os
import subprocess
def unzip_archives(ar_list, env):
for fname in ar_list:
if not os.path.exists(fname):
continue
if fname.endswith('.zip'):
subprocess.call(args=['unzip', fname], env=env)
elif fname.find('.tar') != -1:
subprocess.call(args=['tar', '-xf', fname], env=env)
def main():
if len(sys.argv) < 2:
print('Usage: launcher.py your command')
sys.exit(0)
hadoop_home = os.getenv('HADOOP_HOME')
hdfs_home = os.getenv('HADOOP_HDFS_HOME')
java_home = os.getenv('JAVA_HOME')
hadoop_home = os.getenv('HADOOP_PREFIX') if hadoop_home is None else hadoop_home
cluster = os.getenv('DMLC_JOB_CLUSTER')
assert cluster is not None, 'need to have DMLC_JOB_CLUSTER'
env = os.environ.copy()
library_path = ['./']
class_path = []
if cluster == 'yarn':
assert hadoop_home is not None, 'need to set HADOOP_HOME'
assert hdfs_home is not None, 'need to set HADOOP_HDFS_HOME'
assert java_home is not None, 'need to set JAVA_HOME'
if cluster == 'sge':
num_worker = int(env['DMLC_NUM_WORKER'])
task_id = int(env['DMLC_TASK_ID'])
if task_id < num_worker:
env['DMLC_ROLE'] = 'worker'
else:
env['DMLC_ROLE'] = 'server'
if hadoop_home:
library_path.append('%s/lib/native' % hdfs_home)
library_path.append('%s/lib' % hdfs_home)
(classpath, _) = subprocess.Popen('%s/bin/hadoop classpath' % hadoop_home,
stdout=subprocess.PIPE, shell=True,
env=os.environ).communicate()
for f in classpath.split(':'):
class_path += glob.glob(f)
if java_home:
library_path.append('%s/jre/lib/amd64/server' % java_home)
env['CLASSPATH'] = '${CLASSPATH}:' + (':'.join(class_path))
if 'DMLC_HDFS_OPTS' in env:
env['LIBHDFS_OPTS'] = env['DMLC_HDFS_OPTS']
elif 'LIBHDFS_OPTS' not in env:
env['LIBHDFS_OPTS'] = '--Xmx128m'
LD_LIBRARY_PATH = env['LD_LIBRARY_PATH'] if 'LD_LIBRARY_PATH' in env else ''
env['LD_LIBRARY_PATH'] = LD_LIBRARY_PATH + ':' + ':'.join(library_path)
if 'DMLC_JOB_ARCHIVES' in env:
unzip_archives(env['DMLC_JOB_ARCHIVES'].split(':'), env)
ret = subprocess.call(args=sys.argv[1:], env=env)
sys.exit(ret)
if __name__ == '__main__':
main()
| true | true |
f73e4eea10557ba50f2e6c997da112380550e134 | 5,277 | py | Python | output.py | Casper64/natural-deduction | 30c9f7640126102aa31aae70e0e28322159d766c | [
"MIT"
] | null | null | null | output.py | Casper64/natural-deduction | 30c9f7640126102aa31aae70e0e28322159d766c | [
"MIT"
] | null | null | null | output.py | Casper64/natural-deduction | 30c9f7640126102aa31aae70e0e28322159d766c | [
"MIT"
] | null | null | null | import argparse
from enum import Enum
from random import random
import re
import input
import debug
import math
import util
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from solve import Premise
def display_start() -> None:
p = argparse.ArgumentParser()
p.add_argument("--file", help="Parse from a file")
p.add_argument("--debug", help="Enable debuggin", action='store_true')
args = p.parse_args()
if args.file:
print(f"Importing statements from {args.file}...\n")
input.read_from_file(args.file)
else:
input.read_from_input()
if args.debug:
debug.DEBUG = True
class StepType(Enum):
P=0 # Premise
A=1 # Assumption
OA=2 # Open Assumption
CA=3 # Close Assumption
EI=4 # Elimination implication
II=5 # Introdcution implication
CT=6 # Contradiction aka Introduction negation
IN=7 # Introduction negation
EN=8 # Elimination negation
IA=9 # Introduction and
EA=10 # Elimination and
RI=11 # Reiteration
class Step:
# TODO: implement rule numbers for the steps, hashmap maybe??
def __init__(self, premise: 'Premise', type: StepType, assumptions: list['Premise']=None):
self._premise = premise
premise = str(premise).replace("[", "(").replace("]", ")").replace("'", "")
self.premise = premise
self._type = type
self._assumptions = assumptions
def get(self):
if self._type == StepType.P:
return "[ass.]"
elif self._type == StepType.A:
return "[ass.]"
elif self._type == StepType.EI:
return "[E→, "
elif self._type == StepType.II:
return "[I→, "
elif self._type == StepType.CT:
return "[I¬, "
elif self._type == StepType.IN:
return "[I¬, "
elif self._type == StepType.EN:
return "[E¬, "
elif self._type == StepType.IA:
return "[I^, "
elif self._type == StepType.EA:
return "[E^, "
elif self._type == StepType.RI:
return f"[reit.,#{self._premise.id}]"
return self._type
def __repr__(self):
return f"{self.premise} {self.get()}"
class NaturalDeductionTree:
def __init__(self, statement: str):
self.steps: list[Step] = []
self.statement = statement.replace(":-", "⊢")
self.statement = statement.replace("!", "¬")
def add(self, step: Step):
self.steps.append(step)
def get_premise(self, id: int, r: list[(str, 'Premise')]) -> 'Premise':
for i, (_, a) in enumerate(r):
print(a.id, id)
if a.id == id:
return i
def close(self):
result: list[(str, 'Premise')] = []
r = str(random())
level = 1
line = 1
max_lines = len(str(len(self.steps)))
max_prepend = ' ' * max_lines
for i, step in enumerate(self.steps):
# Change current level if open assumption or close assumption
if step._type == StepType.OA:
level += 1
continue
elif step._type == StepType.CA:
level -= 1
continue
lines = f"{line}{' ' * (max_lines - len(str(line)))}"
if isinstance(step.premise, str):
premise = util.raw_to_str(step.premise)
else:
premise = util.cleanup(str(step.premise))
premise = premise.replace("!", "¬")
if step._type == StepType.CT:
premise = "⊥"
raw_step = step.get()
while match := re.search(r"#(\d+)", raw_step):
id = match.group(1)
i = self.get_premise(int(id), result)
raw_step = raw_step[:match.start()]+str(i+1)+raw_step[match.end():]
break
string = f"{lines}{' │ ' * level}{premise}{r}_{raw_step}\n"
# Open assumption so draw a line
if step._type == StepType.A and i-1 >= 0 and self.steps[i-1]._type == StepType.OA:
string += f"{max_prepend}{' │ ' * (level-1)} ├{'─'*len(premise)}\n"
# If its the last premise draw a line
elif step._type == StepType.P and i+1 != len(self.steps) and self.steps[i+1]._type != StepType.P:
string += f"{max_prepend}{' │ ' * (level-1)} ├{'─'*(len(premise)+1)}\n"
# If its the last premise draw a line, but added the case where the premise is the last premise
elif step._type == StepType.P and i+1 == len(self.steps):
string += f"{max_prepend}{' │ ' * (level-1)} ├{'─'*(len(premise)+1)}\n"
result.append((string, step._premise))
line += 1
max_len = max([len(x[0].split("\n")[0]) for x in result])
p = self.statement+"\n\n"
for string, premise in result:
s = string.split("\n")[0]
l = len(s)
l2 = len(s.split("_")[1])
# Align all action type thingies to the right on the same place
replaceable = " " * (4 + max_len - l + l2)
string = string.replace(r+"_", replaceable)
p += string
# TODO: better way of printing?
print(p) | 32.776398 | 109 | 0.537995 | import argparse
from enum import Enum
from random import random
import re
import input
import debug
import math
import util
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from solve import Premise
def display_start() -> None:
p = argparse.ArgumentParser()
p.add_argument("--file", help="Parse from a file")
p.add_argument("--debug", help="Enable debuggin", action='store_true')
args = p.parse_args()
if args.file:
print(f"Importing statements from {args.file}...\n")
input.read_from_file(args.file)
else:
input.read_from_input()
if args.debug:
debug.DEBUG = True
class StepType(Enum):
P=0
A=1
OA=2
CA=3
EI=4
II=5
CT=6
IN=7
EN=8
IA=9
EA=10
RI=11
class Step:
def __init__(self, premise: 'Premise', type: StepType, assumptions: list['Premise']=None):
self._premise = premise
premise = str(premise).replace("[", "(").replace("]", ")").replace("'", "")
self.premise = premise
self._type = type
self._assumptions = assumptions
def get(self):
if self._type == StepType.P:
return "[ass.]"
elif self._type == StepType.A:
return "[ass.]"
elif self._type == StepType.EI:
return "[E→, "
elif self._type == StepType.II:
return "[I→, "
elif self._type == StepType.CT:
return "[I¬, "
elif self._type == StepType.IN:
return "[I¬, "
elif self._type == StepType.EN:
return "[E¬, "
elif self._type == StepType.IA:
return "[I^, "
elif self._type == StepType.EA:
return "[E^, "
elif self._type == StepType.RI:
return f"[reit.,#{self._premise.id}]"
return self._type
def __repr__(self):
return f"{self.premise} {self.get()}"
class NaturalDeductionTree:
def __init__(self, statement: str):
self.steps: list[Step] = []
self.statement = statement.replace(":-", "⊢")
self.statement = statement.replace("!", "¬")
def add(self, step: Step):
self.steps.append(step)
def get_premise(self, id: int, r: list[(str, 'Premise')]) -> 'Premise':
for i, (_, a) in enumerate(r):
print(a.id, id)
if a.id == id:
return i
def close(self):
result: list[(str, 'Premise')] = []
r = str(random())
level = 1
line = 1
max_lines = len(str(len(self.steps)))
max_prepend = ' ' * max_lines
for i, step in enumerate(self.steps):
# Change current level if open assumption or close assumption
if step._type == StepType.OA:
level += 1
continue
elif step._type == StepType.CA:
level -= 1
continue
lines = f"{line}{' ' * (max_lines - len(str(line)))}"
if isinstance(step.premise, str):
premise = util.raw_to_str(step.premise)
else:
premise = util.cleanup(str(step.premise))
premise = premise.replace("!", "¬")
if step._type == StepType.CT:
premise = "⊥"
raw_step = step.get()
while match := re.search(r"#(\d+)", raw_step):
id = match.group(1)
i = self.get_premise(int(id), result)
raw_step = raw_step[:match.start()]+str(i+1)+raw_step[match.end():]
break
string = f"{lines}{' │ ' * level}{premise}{r}_{raw_step}\n"
# Open assumption so draw a line
if step._type == StepType.A and i-1 >= 0 and self.steps[i-1]._type == StepType.OA:
string += f"{max_prepend}{' │ ' * (level-1)} ├{'─'*len(premise)}\n"
# If its the last premise draw a line
elif step._type == StepType.P and i+1 != len(self.steps) and self.steps[i+1]._type != StepType.P:
string += f"{max_prepend}{' │ ' * (level-1)} ├{'─'*(len(premise)+1)}\n"
# If its the last premise draw a line, but added the case where the premise is the last premise
elif step._type == StepType.P and i+1 == len(self.steps):
string += f"{max_prepend}{' │ ' * (level-1)} ├{'─'*(len(premise)+1)}\n"
result.append((string, step._premise))
line += 1
max_len = max([len(x[0].split("\n")[0]) for x in result])
p = self.statement+"\n\n"
for string, premise in result:
s = string.split("\n")[0]
l = len(s)
l2 = len(s.split("_")[1])
# Align all action type thingies to the right on the same place
replaceable = " " * (4 + max_len - l + l2)
string = string.replace(r+"_", replaceable)
p += string
# TODO: better way of printing?
print(p) | true | true |
f73e4fd64975cbbb92418b729581cb7dbb63ada6 | 1,185 | py | Python | profiles/views/group_list_view.py | Sispheor/squest | f852fe7986521a9d8cd9fb5eb0b56aa15f22548c | [
"Apache-2.0"
] | 112 | 2021-04-21T08:52:55.000Z | 2022-03-01T15:09:19.000Z | profiles/views/group_list_view.py | Sispheor/squest | f852fe7986521a9d8cd9fb5eb0b56aa15f22548c | [
"Apache-2.0"
] | 216 | 2021-04-21T09:06:47.000Z | 2022-03-30T14:21:28.000Z | profiles/views/group_list_view.py | Sispheor/squest | f852fe7986521a9d8cd9fb5eb0b56aa15f22548c | [
"Apache-2.0"
] | 21 | 2021-04-20T13:53:54.000Z | 2022-03-30T21:43:04.000Z | from django_filters.views import FilterView
from django_tables2.views import SingleTableMixin
from django_tables2 import tables, TemplateColumn
from django.contrib.auth.models import Group
from guardian.mixins import LoginRequiredMixin
from profiles.filters.group_filter import GroupFilter
class GroupTable(tables.Table):
actions = TemplateColumn(template_name='custom_columns/group_actions.html', orderable=False)
users = TemplateColumn(template_name='custom_columns/group_users.html', orderable=False)
class Meta:
model = Group
attrs = {"id": "group_table", "class": "table squest-pagination-tables "}
fields = ("name", "users", "actions")
class GroupListView(LoginRequiredMixin, SingleTableMixin, FilterView):
table_pagination = {'per_page': 10}
table_class = GroupTable
model = Group
template_name = 'generics/list.html'
filterset_class = GroupFilter
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = "Groups"
context['html_button_path'] = "generics/buttons/add_group.html"
context['object_name'] = 'group'
return context
| 35.909091 | 96 | 0.733333 | from django_filters.views import FilterView
from django_tables2.views import SingleTableMixin
from django_tables2 import tables, TemplateColumn
from django.contrib.auth.models import Group
from guardian.mixins import LoginRequiredMixin
from profiles.filters.group_filter import GroupFilter
class GroupTable(tables.Table):
actions = TemplateColumn(template_name='custom_columns/group_actions.html', orderable=False)
users = TemplateColumn(template_name='custom_columns/group_users.html', orderable=False)
class Meta:
model = Group
attrs = {"id": "group_table", "class": "table squest-pagination-tables "}
fields = ("name", "users", "actions")
class GroupListView(LoginRequiredMixin, SingleTableMixin, FilterView):
table_pagination = {'per_page': 10}
table_class = GroupTable
model = Group
template_name = 'generics/list.html'
filterset_class = GroupFilter
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = "Groups"
context['html_button_path'] = "generics/buttons/add_group.html"
context['object_name'] = 'group'
return context
| true | true |
f73e505f6fa1318b67875300d1dce743aeef3d2b | 7,591 | py | Python | oneflow/python/onnx/optimizer/back_to_back_optimizer.py | xxg1413/oneflow | f2e3c85a25b8aecfb6c0c0af1737833b1a77e135 | [
"Apache-2.0"
] | 1 | 2020-12-04T03:06:16.000Z | 2020-12-04T03:06:16.000Z | oneflow/python/onnx/optimizer/back_to_back_optimizer.py | xxg1413/oneflow | f2e3c85a25b8aecfb6c0c0af1737833b1a77e135 | [
"Apache-2.0"
] | null | null | null | oneflow/python/onnx/optimizer/back_to_back_optimizer.py | xxg1413/oneflow | f2e3c85a25b8aecfb6c0c0af1737833b1a77e135 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
# Back_To_Back Optimizer.
# Collapse consecutive nodes into 1 node if possible.
from __future__ import unicode_literals
from oneflow.python.onnx.util import ONNX_DTYPE_NAMES # lgtm[py/unsafe-cyclic-import]
from .optimizer_base import GraphOptimizerBase # lgtm[py/unsafe-cyclic-import]
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ
_func_map = {}
def _register_func(op_type):
def _internal_fun(func):
_func_map[op_type] = func
return func
return _internal_fun
class BackToBackOptimizer(GraphOptimizerBase):
"""Remove back-to-back nodes e.g. 'Cast'
"""
def __init__(self): # pylint: disable=useless-super-delegation
super(BackToBackOptimizer, self).__init__()
def _Optimize(self, graph):
return self._ApplyOptimization(graph, self._OptimizeAtCurrentGraphLevel)
def _OptimizeAtCurrentGraphLevel(self, g):
for optype, handler in _func_map.items():
# candidate nodes for removal/optimization
nodes = [n for n in g.get_nodes() if n.type in optype]
# topological sort of candidates
# simplifying assumption for back-to-back-optimizer is
# the op_types have 1 input, 1 output, but multiple consumers
has_dependencies = set()
consumer_node_ids = {n.output[0]: [] for n in nodes}
for n in nodes:
if n.input[0] in consumer_node_ids:
consumer_node_ids[n.input[0]].extend([n])
has_dependencies.add(n.output[0])
# q = starting nodes with no dependencies
q = list(set(consumer_node_ids.keys()) - has_dependencies)
while q:
nodeid = q.pop(0)
node = g.get_node_by_output(nodeid, False)
consumer_nodes = consumer_node_ids[nodeid]
if len(consumer_nodes) > 0:
all_consumers = g.FindOutputConsumers(node.output[0])
if len(all_consumers) != len(consumer_nodes):
# if first node is used elsewhere, skip
continue
if set(node.output) & set(g.outputs):
# if this node is part of graph outputs, skip
continue
q2 = handler(g, node, consumer_nodes)
# add more nodes which can now be processed
q.extend(q2)
return g
@staticmethod
@_register_func("Cast")
def _OptimizeCast(g, node, consumer_nodes):
"""remove long chains of cast ops"""
q2 = []
type1 = node.get_attr("to").i
type1_name = ONNX_DTYPE_NAMES[type1] if type1 in ONNX_DTYPE_NAMES else ""
# if parent node is cast node, and same type, delete this one
pnode = node.inputs[0]
if pnode.type == "Cast":
type2 = pnode.get_attr("to").i
if type1 == type2:
for node2 in consumer_nodes:
node2.input[0] = node.input[0]
q2.append(node2.output[0])
g.RemoveNode(node.name)
return q2
# otherwise, check consumer cast nodes for a target type
# that contains more information than current type
can_reduce = True
for node2 in consumer_nodes:
type2 = node2.get_attr("to").i
type2_name = ONNX_DTYPE_NAMES[type2] if type2 in ONNX_DTYPE_NAMES else ""
if "float" in type1_name or type1_name == "double":
# high information type. ok to eliminate
pass
elif "int" in type1_name:
# int* and uint* are mix of high and low information.
# for safety, keep the current node, unless type2 is bool,
# in which case it's ok to remove node
if type1 != type2 and type2_name != "bool":
can_reduce = False
elif type1_name == "bool":
# bool is low information, so don't eliminate
if type1 != type2:
can_reduce = False
elif type1_name == "string":
# can always remove string
pass
else:
# some odd type, keep node
can_reduce = False
q2.append(node2.output[0])
if can_reduce:
for node2 in consumer_nodes:
node2.input[0] = node.input[0]
g.RemoveNode(node.name)
return q2
@staticmethod
@_register_func("Transpose")
def _OptimizeTranspose(g, node, consumer_nodes):
"""remove long chains of transpose ops"""
t1 = list(node.get_attr("perm").ints)
q2 = []
for node2 in consumer_nodes:
node2.input[0] = node.input[0]
t2 = list(node2.get_attr("perm").ints)
new_perm = [t1[i] for i in t2]
# check if node2 can be removed. otherwise only update
if new_perm == list(range(len(t2))):
# both nodes can be deleted
shape = g.get_shape(node2.output[0])
dtype = g.get_dtype(node2.output[0])
node2_consumers = g.FindOutputConsumers(node2.output[0])
g.ReplaceAllInputs(node2_consumers, node2.output[0], node.input[0])
g.RemoveNode(node2.name)
if set(node2.output) & set(g.outputs):
g.MakeNode(
"Identity",
[node.input[0]],
outputs=node2.output,
shapes=[shape],
dtypes=[dtype],
)
else:
node2.set_attr("perm", [t1[i] for i in t2])
q2.append(node2.output[0])
g.RemoveNode(node.name)
return q2
@staticmethod
@_register_func(("Squeeze", "Unsqueeze"))
def _OptimizeSqueezeUnsqueeze(g, node, consumer_nodes):
"""remove pairs of squeeze-unsqueeze nodes"""
if node.type != "Squeeze" or len(consumer_nodes) != 1:
# no need to return any value, since not removing long chain of nodes
return []
node2 = consumer_nodes[0]
if node2.type != "Unsqueeze":
return []
axis1 = node.get_attr("axes").ints
axis2 = node2.get_attr("axes").ints
# if squeeze followed by unsqueeze is on diff axes, skip
if axis1 != axis2:
return []
# if unsqueeze output is graph output, skip
if set(node2.output) & set(g.outputs):
return []
node2_consumers = g.FindOutputConsumers(node2.output[0])
g.ReplaceAllInputs(node2_consumers, node2.output[0], node.input[0])
g.RemoveNode(node.name)
g.RemoveNode(node2.name)
return []
| 38.145729 | 101 | 0.580819 |
from __future__ import unicode_literals
from oneflow.python.onnx.util import ONNX_DTYPE_NAMES
from .optimizer_base import GraphOptimizerBase
_func_map = {}
def _register_func(op_type):
def _internal_fun(func):
_func_map[op_type] = func
return func
return _internal_fun
class BackToBackOptimizer(GraphOptimizerBase):
def __init__(self):
super(BackToBackOptimizer, self).__init__()
def _Optimize(self, graph):
return self._ApplyOptimization(graph, self._OptimizeAtCurrentGraphLevel)
def _OptimizeAtCurrentGraphLevel(self, g):
for optype, handler in _func_map.items():
nodes = [n for n in g.get_nodes() if n.type in optype]
has_dependencies = set()
consumer_node_ids = {n.output[0]: [] for n in nodes}
for n in nodes:
if n.input[0] in consumer_node_ids:
consumer_node_ids[n.input[0]].extend([n])
has_dependencies.add(n.output[0])
q = list(set(consumer_node_ids.keys()) - has_dependencies)
while q:
nodeid = q.pop(0)
node = g.get_node_by_output(nodeid, False)
consumer_nodes = consumer_node_ids[nodeid]
if len(consumer_nodes) > 0:
all_consumers = g.FindOutputConsumers(node.output[0])
if len(all_consumers) != len(consumer_nodes):
continue
if set(node.output) & set(g.outputs):
continue
q2 = handler(g, node, consumer_nodes)
q.extend(q2)
return g
@staticmethod
@_register_func("Cast")
def _OptimizeCast(g, node, consumer_nodes):
q2 = []
type1 = node.get_attr("to").i
type1_name = ONNX_DTYPE_NAMES[type1] if type1 in ONNX_DTYPE_NAMES else ""
pnode = node.inputs[0]
if pnode.type == "Cast":
type2 = pnode.get_attr("to").i
if type1 == type2:
for node2 in consumer_nodes:
node2.input[0] = node.input[0]
q2.append(node2.output[0])
g.RemoveNode(node.name)
return q2
can_reduce = True
for node2 in consumer_nodes:
type2 = node2.get_attr("to").i
type2_name = ONNX_DTYPE_NAMES[type2] if type2 in ONNX_DTYPE_NAMES else ""
if "float" in type1_name or type1_name == "double":
pass
elif "int" in type1_name:
if type1 != type2 and type2_name != "bool":
can_reduce = False
elif type1_name == "bool":
# bool is low information, so don't eliminate
if type1 != type2:
can_reduce = False
elif type1_name == "string":
pass
else:
can_reduce = False
q2.append(node2.output[0])
if can_reduce:
for node2 in consumer_nodes:
node2.input[0] = node.input[0]
g.RemoveNode(node.name)
return q2
@staticmethod
@_register_func("Transpose")
def _OptimizeTranspose(g, node, consumer_nodes):
t1 = list(node.get_attr("perm").ints)
q2 = []
for node2 in consumer_nodes:
node2.input[0] = node.input[0]
t2 = list(node2.get_attr("perm").ints)
new_perm = [t1[i] for i in t2]
if new_perm == list(range(len(t2))):
shape = g.get_shape(node2.output[0])
dtype = g.get_dtype(node2.output[0])
node2_consumers = g.FindOutputConsumers(node2.output[0])
g.ReplaceAllInputs(node2_consumers, node2.output[0], node.input[0])
g.RemoveNode(node2.name)
if set(node2.output) & set(g.outputs):
g.MakeNode(
"Identity",
[node.input[0]],
outputs=node2.output,
shapes=[shape],
dtypes=[dtype],
)
else:
node2.set_attr("perm", [t1[i] for i in t2])
q2.append(node2.output[0])
g.RemoveNode(node.name)
return q2
@staticmethod
@_register_func(("Squeeze", "Unsqueeze"))
def _OptimizeSqueezeUnsqueeze(g, node, consumer_nodes):
if node.type != "Squeeze" or len(consumer_nodes) != 1:
return []
node2 = consumer_nodes[0]
if node2.type != "Unsqueeze":
return []
axis1 = node.get_attr("axes").ints
axis2 = node2.get_attr("axes").ints
if axis1 != axis2:
return []
if set(node2.output) & set(g.outputs):
return []
node2_consumers = g.FindOutputConsumers(node2.output[0])
g.ReplaceAllInputs(node2_consumers, node2.output[0], node.input[0])
g.RemoveNode(node.name)
g.RemoveNode(node2.name)
return []
| true | true |
f73e5203812e40469064ff1483357558448878d9 | 1,781 | py | Python | questions/coin-change/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 141 | 2017-12-12T21:45:53.000Z | 2022-03-25T07:03:39.000Z | questions/coin-change/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 32 | 2015-10-05T14:09:52.000Z | 2021-05-30T10:28:41.000Z | questions/coin-change/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 56 | 2015-09-30T05:23:28.000Z | 2022-03-08T07:57:11.000Z | """
You are given coins of different denominations and a total amount of money amount. Write a function to compute the fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any combination of the coins, return -1.
You may assume that you have an infinite number of each kind of coin.
Example 1:
Input: coins = [1,2,5], amount = 11
Output: 3
Explanation: 11 = 5 + 5 + 1
Example 2:
Input: coins = [2], amount = 3
Output: -1
Example 3:
Input: coins = [1], amount = 0
Output: 0
Example 4:
Input: coins = [1], amount = 1
Output: 1
Example 5:
Input: coins = [1], amount = 2
Output: 2
Constraints:
1 <= coins.length <= 12
1 <= coins[i] <= 231 - 1
0 <= amount <= 104
"""
import collections
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
### method 1: BFS, model it as a graph problem
# queue = collections.deque()
# queue.append((0, 0))
# visited = set()
# while queue:
# curr, step = queue.popleft()
# if curr == amount:
# return step
# if curr in visited or curr > amount:
# continue
# visited.add(curr)
# for coin in coins:
# neighbor = curr + coin
# if neighbor in visited:
# continue
# queue.append((neighbor, step + 1))
# return -1
### method 2: dp
dp = [0] + [None] * amount
for i in range(1, amount + 1):
candidates = list(filter(lambda x: x is not None, [dp[i - c] if i - c >= 0 else None for c in coins]))
dp[i] = min(candidates) + 1 if candidates else None
return dp[amount] if dp[amount] is not None else -1 | 25.811594 | 261 | 0.567097 |
import collections
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
amount
for i in range(1, amount + 1):
candidates = list(filter(lambda x: x is not None, [dp[i - c] if i - c >= 0 else None for c in coins]))
dp[i] = min(candidates) + 1 if candidates else None
return dp[amount] if dp[amount] is not None else -1 | true | true |
f73e525efe3821bc206c38a268ae89fda49c3dc4 | 9,618 | py | Python | homeassistant/components/octoprint/__init__.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 5 | 2020-09-17T10:48:51.000Z | 2021-11-22T00:08:17.000Z | homeassistant/components/octoprint/__init__.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/components/octoprint/__init__.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 6 | 2019-12-01T19:06:52.000Z | 2020-09-17T00:57:06.000Z | """Support for monitoring OctoPrint 3D printers."""
import logging
import time
from aiohttp.hdrs import CONTENT_TYPE
import requests
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_OCTOPRINT
from homeassistant.const import (
CONF_API_KEY,
CONF_BINARY_SENSORS,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SENSORS,
CONF_SSL,
CONTENT_TYPE_JSON,
TEMP_CELSIUS,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.util import slugify as util_slugify
_LOGGER = logging.getLogger(__name__)
CONF_BED = "bed"
CONF_NUMBER_OF_TOOLS = "number_of_tools"
DEFAULT_NAME = "OctoPrint"
DOMAIN = "octoprint"
def has_all_unique_names(value):
"""Validate that printers have an unique name."""
names = [util_slugify(printer["name"]) for printer in value]
vol.Schema(vol.Unique())(names)
return value
def ensure_valid_path(value):
"""Validate the path, ensuring it starts and ends with a /."""
vol.Schema(cv.string)(value)
if value[0] != "/":
value = f"/{value}"
if value[-1] != "/":
value += "/"
return value
BINARY_SENSOR_TYPES = {
# API Endpoint, Group, Key, unit
"Printing": ["printer", "state", "printing", None],
"Printing Error": ["printer", "state", "error", None],
}
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSOR_TYPES)
): vol.All(cv.ensure_list, [vol.In(BINARY_SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SENSOR_TYPES = {
# API Endpoint, Group, Key, unit, icon
"Temperatures": ["printer", "temperature", "*", TEMP_CELSIUS],
"Current State": ["printer", "state", "text", None, "mdi:printer-3d"],
"Job Percentage": ["job", "progress", "completion", "%", "mdi:file-percent"],
"Time Remaining": ["job", "progress", "printTimeLeft", "seconds", "mdi:clock-end"],
"Time Elapsed": ["job", "progress", "printTime", "seconds", "mdi:clock-start"],
}
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_PORT, default=80): cv.port,
vol.Optional(CONF_PATH, default="/"): ensure_valid_path,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_NUMBER_OF_TOOLS, default=0): cv.positive_int,
vol.Optional(CONF_BED, default=False): cv.boolean,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(
CONF_BINARY_SENSORS, default={}
): BINARY_SENSOR_SCHEMA,
}
)
],
has_all_unique_names,
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the OctoPrint component."""
printers = hass.data[DOMAIN] = {}
success = False
def device_discovered(service, info):
"""Get called when an Octoprint server has been discovered."""
_LOGGER.debug("Found an Octoprint server: %s", info)
discovery.listen(hass, SERVICE_OCTOPRINT, device_discovered)
if DOMAIN not in config:
# Skip the setup if there is no configuration present
return True
for printer in config[DOMAIN]:
name = printer[CONF_NAME]
ssl = "s" if printer[CONF_SSL] else ""
base_url = "http{}://{}:{}{}api/".format(
ssl, printer[CONF_HOST], printer[CONF_PORT], printer[CONF_PATH]
)
api_key = printer[CONF_API_KEY]
number_of_tools = printer[CONF_NUMBER_OF_TOOLS]
bed = printer[CONF_BED]
try:
octoprint_api = OctoPrintAPI(base_url, api_key, bed, number_of_tools)
printers[base_url] = octoprint_api
octoprint_api.get("printer")
octoprint_api.get("job")
except requests.exceptions.RequestException as conn_err:
_LOGGER.error("Error setting up OctoPrint API: %r", conn_err)
continue
sensors = printer[CONF_SENSORS][CONF_MONITORED_CONDITIONS]
load_platform(
hass,
"sensor",
DOMAIN,
{"name": name, "base_url": base_url, "sensors": sensors},
config,
)
b_sensors = printer[CONF_BINARY_SENSORS][CONF_MONITORED_CONDITIONS]
load_platform(
hass,
"binary_sensor",
DOMAIN,
{"name": name, "base_url": base_url, "sensors": b_sensors},
config,
)
success = True
return success
class OctoPrintAPI:
"""Simple JSON wrapper for OctoPrint's API."""
def __init__(self, api_url, key, bed, number_of_tools):
"""Initialize OctoPrint API and set headers needed later."""
self.api_url = api_url
self.headers = {CONTENT_TYPE: CONTENT_TYPE_JSON, "X-Api-Key": key}
self.printer_last_reading = [{}, None]
self.job_last_reading = [{}, None]
self.job_available = False
self.printer_available = False
self.available = False
self.printer_error_logged = False
self.job_error_logged = False
self.bed = bed
self.number_of_tools = number_of_tools
def get_tools(self):
"""Get the list of tools that temperature is monitored on."""
tools = []
if self.number_of_tools > 0:
for tool_number in range(0, self.number_of_tools):
tools.append(f"tool{tool_number!s}")
if self.bed:
tools.append("bed")
if not self.bed and self.number_of_tools == 0:
temps = self.printer_last_reading[0].get("temperature")
if temps is not None:
tools = temps.keys()
return tools
def get(self, endpoint):
"""Send a get request, and return the response as a dict."""
# Only query the API at most every 30 seconds
now = time.time()
if endpoint == "job":
last_time = self.job_last_reading[1]
if last_time is not None:
if now - last_time < 30.0:
return self.job_last_reading[0]
elif endpoint == "printer":
last_time = self.printer_last_reading[1]
if last_time is not None:
if now - last_time < 30.0:
return self.printer_last_reading[0]
url = self.api_url + endpoint
try:
response = requests.get(url, headers=self.headers, timeout=9)
response.raise_for_status()
if endpoint == "job":
self.job_last_reading[0] = response.json()
self.job_last_reading[1] = time.time()
self.job_available = True
elif endpoint == "printer":
self.printer_last_reading[0] = response.json()
self.printer_last_reading[1] = time.time()
self.printer_available = True
self.available = self.printer_available and self.job_available
if self.available:
self.job_error_logged = False
self.printer_error_logged = False
return response.json()
except Exception as conn_exc: # pylint: disable=broad-except
log_string = "Failed to update OctoPrint status. Error: %s" % conn_exc
# Only log the first failure
if endpoint == "job":
log_string = f"Endpoint: job {log_string}"
if not self.job_error_logged:
_LOGGER.error(log_string)
self.job_error_logged = True
self.job_available = False
elif endpoint == "printer":
log_string = f"Endpoint: printer {log_string}"
if not self.printer_error_logged:
_LOGGER.error(log_string)
self.printer_error_logged = True
self.printer_available = False
self.available = False
return None
def update(self, sensor_type, end_point, group, tool=None):
"""Return the value for sensor_type from the provided endpoint."""
response = self.get(end_point)
if response is not None:
return get_value_from_json(response, sensor_type, group, tool)
return response
def get_value_from_json(json_dict, sensor_type, group, tool):
"""Return the value for sensor_type from the JSON."""
if group not in json_dict:
return None
if sensor_type in json_dict[group]:
if sensor_type == "target" and json_dict[sensor_type] is None:
return 0
return json_dict[group][sensor_type]
if tool is not None:
if sensor_type in json_dict[group][tool]:
return json_dict[group][tool][sensor_type]
return None
| 35.10219 | 87 | 0.597525 | import logging
import time
from aiohttp.hdrs import CONTENT_TYPE
import requests
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_OCTOPRINT
from homeassistant.const import (
CONF_API_KEY,
CONF_BINARY_SENSORS,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SENSORS,
CONF_SSL,
CONTENT_TYPE_JSON,
TEMP_CELSIUS,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.util import slugify as util_slugify
_LOGGER = logging.getLogger(__name__)
CONF_BED = "bed"
CONF_NUMBER_OF_TOOLS = "number_of_tools"
DEFAULT_NAME = "OctoPrint"
DOMAIN = "octoprint"
def has_all_unique_names(value):
names = [util_slugify(printer["name"]) for printer in value]
vol.Schema(vol.Unique())(names)
return value
def ensure_valid_path(value):
vol.Schema(cv.string)(value)
if value[0] != "/":
value = f"/{value}"
if value[-1] != "/":
value += "/"
return value
BINARY_SENSOR_TYPES = {
"Printing": ["printer", "state", "printing", None],
"Printing Error": ["printer", "state", "error", None],
}
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSOR_TYPES)
): vol.All(cv.ensure_list, [vol.In(BINARY_SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SENSOR_TYPES = {
"Temperatures": ["printer", "temperature", "*", TEMP_CELSIUS],
"Current State": ["printer", "state", "text", None, "mdi:printer-3d"],
"Job Percentage": ["job", "progress", "completion", "%", "mdi:file-percent"],
"Time Remaining": ["job", "progress", "printTimeLeft", "seconds", "mdi:clock-end"],
"Time Elapsed": ["job", "progress", "printTime", "seconds", "mdi:clock-start"],
}
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_PORT, default=80): cv.port,
vol.Optional(CONF_PATH, default="/"): ensure_valid_path,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_NUMBER_OF_TOOLS, default=0): cv.positive_int,
vol.Optional(CONF_BED, default=False): cv.boolean,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(
CONF_BINARY_SENSORS, default={}
): BINARY_SENSOR_SCHEMA,
}
)
],
has_all_unique_names,
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
printers = hass.data[DOMAIN] = {}
success = False
def device_discovered(service, info):
_LOGGER.debug("Found an Octoprint server: %s", info)
discovery.listen(hass, SERVICE_OCTOPRINT, device_discovered)
if DOMAIN not in config:
return True
for printer in config[DOMAIN]:
name = printer[CONF_NAME]
ssl = "s" if printer[CONF_SSL] else ""
base_url = "http{}://{}:{}{}api/".format(
ssl, printer[CONF_HOST], printer[CONF_PORT], printer[CONF_PATH]
)
api_key = printer[CONF_API_KEY]
number_of_tools = printer[CONF_NUMBER_OF_TOOLS]
bed = printer[CONF_BED]
try:
octoprint_api = OctoPrintAPI(base_url, api_key, bed, number_of_tools)
printers[base_url] = octoprint_api
octoprint_api.get("printer")
octoprint_api.get("job")
except requests.exceptions.RequestException as conn_err:
_LOGGER.error("Error setting up OctoPrint API: %r", conn_err)
continue
sensors = printer[CONF_SENSORS][CONF_MONITORED_CONDITIONS]
load_platform(
hass,
"sensor",
DOMAIN,
{"name": name, "base_url": base_url, "sensors": sensors},
config,
)
b_sensors = printer[CONF_BINARY_SENSORS][CONF_MONITORED_CONDITIONS]
load_platform(
hass,
"binary_sensor",
DOMAIN,
{"name": name, "base_url": base_url, "sensors": b_sensors},
config,
)
success = True
return success
class OctoPrintAPI:
def __init__(self, api_url, key, bed, number_of_tools):
self.api_url = api_url
self.headers = {CONTENT_TYPE: CONTENT_TYPE_JSON, "X-Api-Key": key}
self.printer_last_reading = [{}, None]
self.job_last_reading = [{}, None]
self.job_available = False
self.printer_available = False
self.available = False
self.printer_error_logged = False
self.job_error_logged = False
self.bed = bed
self.number_of_tools = number_of_tools
def get_tools(self):
tools = []
if self.number_of_tools > 0:
for tool_number in range(0, self.number_of_tools):
tools.append(f"tool{tool_number!s}")
if self.bed:
tools.append("bed")
if not self.bed and self.number_of_tools == 0:
temps = self.printer_last_reading[0].get("temperature")
if temps is not None:
tools = temps.keys()
return tools
def get(self, endpoint):
now = time.time()
if endpoint == "job":
last_time = self.job_last_reading[1]
if last_time is not None:
if now - last_time < 30.0:
return self.job_last_reading[0]
elif endpoint == "printer":
last_time = self.printer_last_reading[1]
if last_time is not None:
if now - last_time < 30.0:
return self.printer_last_reading[0]
url = self.api_url + endpoint
try:
response = requests.get(url, headers=self.headers, timeout=9)
response.raise_for_status()
if endpoint == "job":
self.job_last_reading[0] = response.json()
self.job_last_reading[1] = time.time()
self.job_available = True
elif endpoint == "printer":
self.printer_last_reading[0] = response.json()
self.printer_last_reading[1] = time.time()
self.printer_available = True
self.available = self.printer_available and self.job_available
if self.available:
self.job_error_logged = False
self.printer_error_logged = False
return response.json()
except Exception as conn_exc:
log_string = "Failed to update OctoPrint status. Error: %s" % conn_exc
if endpoint == "job":
log_string = f"Endpoint: job {log_string}"
if not self.job_error_logged:
_LOGGER.error(log_string)
self.job_error_logged = True
self.job_available = False
elif endpoint == "printer":
log_string = f"Endpoint: printer {log_string}"
if not self.printer_error_logged:
_LOGGER.error(log_string)
self.printer_error_logged = True
self.printer_available = False
self.available = False
return None
def update(self, sensor_type, end_point, group, tool=None):
response = self.get(end_point)
if response is not None:
return get_value_from_json(response, sensor_type, group, tool)
return response
def get_value_from_json(json_dict, sensor_type, group, tool):
if group not in json_dict:
return None
if sensor_type in json_dict[group]:
if sensor_type == "target" and json_dict[sensor_type] is None:
return 0
return json_dict[group][sensor_type]
if tool is not None:
if sensor_type in json_dict[group][tool]:
return json_dict[group][tool][sensor_type]
return None
| true | true |
f73e526d89710d8df3afc67a77c680cc3eba815e | 13,541 | py | Python | technical_deployment/train_model/imdb_data.py | ChaplinMarchais/cortana-intelligence-product-detection-from-images | a28894b2eeb1b8397d84286f66bdc8f947e543b4 | [
"MIT"
] | 1 | 2018-05-14T05:26:36.000Z | 2018-05-14T05:26:36.000Z | technical_deployment/train_model/imdb_data.py | Thirapat/cortana-intelligence-product-detection-from-images | 10077cb022b95239064944ec647888c86ca6aca9 | [
"MIT"
] | 4 | 2021-06-08T23:55:34.000Z | 2022-03-12T00:55:55.000Z | technical_deployment/train_model/imdb_data.py | isabella232/cortana-intelligence-product-detection-from-images | 2e5370098f9f83cd27cdaba2eab675f3c30ae157 | [
"MIT"
] | 3 | 2018-04-11T18:15:11.000Z | 2019-10-15T13:59:54.000Z | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from __future__ import print_function
from builtins import range
import sys, os
from helpers import *
import scipy.sparse
import scipy.io as sio
import pickle as cp
import numpy as np
import fastRCNN
class imdb_data(fastRCNN.imdb):
def __init__(self, image_set, classes, maxNrRois, imgDir, roiDir, cacheDir, boAddGroundTruthRois):
fastRCNN.imdb.__init__(self, image_set + ".cache") #'data_' + image_set)
self._image_set = image_set
self._maxNrRois = maxNrRois
self._imgDir = imgDir
self._roiDir = roiDir
self._cacheDir = cacheDir #cache_path
self._imgSubdirs ={'train': ['positive', 'negative'], 'test': ['testImages']}
self._classes = classes
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._image_ext = '.jpg'
self._image_index, self._image_subdirs = self._load_image_set_index()
self._roidb_handler = self.selective_search_roidb
self._boAddGroundTruthRois = boAddGroundTruthRois
#overwrite parent definition
@property
def cache_path(self):
return self._cacheDir
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_subdirs[i], self._image_index[i])
def image_path_from_index(self, subdir, fname):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._imgDir, subdir, fname)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Compile list of image indices and the subdirectories they are in.
"""
image_index = []
image_subdirs = []
for subdir in self._imgSubdirs[self._image_set]:
imgFilenames = getFilesInDirectory(os.path.join(self._imgDir,subdir), self._image_ext)
image_index += imgFilenames
image_subdirs += [subdir] * len(imgFilenames)
return image_index, image_subdirs
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cp.load(fid)
print ('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_annotation(i) for i in range(self.num_images)]
with open(cache_file, 'wb') as fid:
cp.dump(gt_roidb, fid, cp.HIGHEST_PROTOCOL)
print ('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
if sys.version_info[0] < 3:
roidb = cp.load(fid)
else:
roidb = cp.load(fid, encoding='latin1')
print ('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
#add ground truth ROIs
if self._boAddGroundTruthRois:
roidb = self.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = ss_roidb
#Keep max of e.g. 2000 rois
if self._maxNrRois and self._maxNrRois > 0:
print ("Only keeping the first %d ROIs.." % self._maxNrRois)
for i in range(self.num_images):
gt_overlaps = roidb[i]['gt_overlaps']
gt_overlaps = gt_overlaps.todense()[:self._maxNrRois]
gt_overlaps = scipy.sparse.csr_matrix(gt_overlaps)
roidb[i]['gt_overlaps'] = gt_overlaps
roidb[i]['boxes'] = roidb[i]['boxes'][:self._maxNrRois,:]
roidb[i]['gt_classes'] = roidb[i]['gt_classes'][:self._maxNrRois]
with open(cache_file, 'wb') as fid:
cp.dump(roidb, fid, cp.HIGHEST_PROTOCOL)
print ('wrote ss roidb to {}'.format(cache_file))
return roidb
def _load_selective_search_roidb(self, gt_roidb):
# box_list = nrImages x nrBoxes x 4
box_list = []
for imgFilename, subdir in zip(self._image_index, self._image_subdirs):
roiPath = "{}/{}/{}.roi.txt".format(self._roiDir, subdir, imgFilename[:-4])
assert os.path.exists(roiPath), "Error: rois file not found: " + roiPath
rois = np.loadtxt(roiPath, np.int32)
box_list.append(rois)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_annotation(self, imgIndex):
"""
Load image and bounding boxes info from human annotations.
"""
#negative images do not have any ground truth annotations
if self._image_subdirs[imgIndex].lower() == "negative":
return None
imgPath = self.image_path_at(imgIndex)
bboxesPaths = imgPath[:-4] + ".bboxes.tsv"
labelsPaths = imgPath[:-4] + ".bboxes.labels.tsv"
assert os.path.exists(bboxesPaths), "Error: ground truth bounding boxes file not found: " + bboxesPaths
assert os.path.exists(labelsPaths), "Error: ground truth labels file not found: " + bboxesPaths
bboxes = np.loadtxt(bboxesPaths, np.float32)
labels = readFile(labelsPaths)
# in case there's only one annotation and numpy read the array as single array,
# we need to make sure the input is treated as a multi dimensional array instead of a list/ 1D array
#if len(bboxes.shape) == 1:
if len(bboxes)>0 and type(bboxes[0]) == np.float32:
bboxes = np.array([bboxes])
#remove boxes marked as 'undecided' or 'exclude'
indicesToKeep = find(labels, lambda x: x!='EXCLUDE' and x!='UNDECIDED')
bboxes = [bboxes[i] for i in indicesToKeep]
labels = [labels[i] for i in indicesToKeep]
# Load object bounding boxes into a data frame.
num_objs = len(bboxes)
boxes = np.zeros((num_objs,4), dtype=np.uint16)
gt_classes = np.zeros(num_objs, dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
for bboxIndex,(bbox,label) in enumerate(zip(bboxes,labels)):
cls = self._class_to_ind[label] #.decode('utf-8')]
boxes[bboxIndex, :] = bbox
gt_classes[bboxIndex] = cls
overlaps[bboxIndex, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False}
# main call to compute per-calass average precision
# shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score
# (see also test_net() in fastRCNN\test.py)
def evaluate_detections(self, all_boxes, output_dir, use_07_metric=False, overlapThreshold = 0.5):
aps = []
for classIndex, className in enumerate(self._classes):
if className != '__background__':
rec, prec, ap = self._evaluate_detections(classIndex, all_boxes, use_07_metric, overlapThreshold)
aps += [[className,ap]]
print('AP for {:>15} = {:.4f}'.format(className, ap))
print('Mean AP = {:.4f}'.format(np.nanmean(getColumn(aps,1))))
return aps
def _evaluate_detections(self, classIndex, all_boxes, use_07_metric = False, overlapThreshold = 0.5):
"""
Top level function that does the PASCAL VOC evaluation.
[overlapThreshold]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation (default False)
"""
assert (len(all_boxes) == self.num_classes)
assert (len(all_boxes[0]) == self.num_images)
# load ground truth annotations for this class
gtInfos = []
for imgIndex in range(self.num_images):
imgPath = self.image_path_at(imgIndex)
imgSubir = os.path.normpath(imgPath).split(os.path.sep)[-2]
if imgSubir != 'negative':
gtBoxes, gtLabels = readGtAnnotation(imgPath)
gtBoxes = [box for box, label in zip(gtBoxes, gtLabels) if label == self.classes[classIndex]] #.decode('utf-8')
else:
gtBoxes = []
gtInfos.append({'bbox': np.array(gtBoxes),
'difficult': [False] * len(gtBoxes),
'det': [False] * len(gtBoxes)})
# parse detections for this class
# shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score
detBboxes = []
detImgIndices = []
detConfidences = []
for imgIndex in range(self.num_images):
dets = all_boxes[classIndex][imgIndex]
if dets != []:
for k in range(dets.shape[0]):
detImgIndices.append(imgIndex)
detConfidences.append(dets[k, -1])
# the VOCdevkit expects 1-based indices
detBboxes.append([dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1])
detBboxes = np.array(detBboxes)
detConfidences = np.array(detConfidences)
# debug: visualize GT and detections
# if classIndex == 15: # and imgPath.endswith("WIN_20160803_11_42_36_Pro.jpg"):
# imgIndex = 6
# imgPath = self.image_path_at(imgIndex)
# img = imread(imgPath)
# tmp_gtBoxes = gtInfos[imgIndex]['bbox']
# inds = np.where(np.array(detImgIndices) == 1)[0]
# tmp_detBoxes = detBboxes[inds]
# print(detConfidences[inds])
# drawRectangles(img, tmp_gtBoxes, color = (255, 0, 0)) #thickness=thickness)
# drawRectangles(img, tmp_detBoxes, color= (0, 255, 0)) # thickness=thickness)
# imshow(img, maxDim=800)
# compute precision / recall / ap
rec, prec, ap = self._voc_computePrecisionRecallAp(
class_recs=gtInfos,
confidence=detConfidences,
image_ids=detImgIndices,
BB=detBboxes,
ovthresh=overlapThreshold,
use_07_metric=use_07_metric)
return rec, prec, ap
#########################################################################
# Python evaluation functions (copied/refactored from faster-RCNN)
##########################################################################
def _voc_computePrecisionRecallAp(self, class_recs, confidence, image_ids, BB, ovthresh=0.5, use_07_metric=False):
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
npos = sum([len(cr['bbox']) for cr in class_recs])
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = computeAveragePrecision(rec, prec, use_07_metric)
return rec, prec, ap | 41.922601 | 128 | 0.571819 |
from __future__ import print_function
from builtins import range
import sys, os
from helpers import *
import scipy.sparse
import scipy.io as sio
import pickle as cp
import numpy as np
import fastRCNN
class imdb_data(fastRCNN.imdb):
def __init__(self, image_set, classes, maxNrRois, imgDir, roiDir, cacheDir, boAddGroundTruthRois):
fastRCNN.imdb.__init__(self, image_set + ".cache")
self._image_set = image_set
self._maxNrRois = maxNrRois
self._imgDir = imgDir
self._roiDir = roiDir
self._cacheDir = cacheDir
self._imgSubdirs ={'train': ['positive', 'negative'], 'test': ['testImages']}
self._classes = classes
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._image_ext = '.jpg'
self._image_index, self._image_subdirs = self._load_image_set_index()
self._roidb_handler = self.selective_search_roidb
self._boAddGroundTruthRois = boAddGroundTruthRois
@property
def cache_path(self):
return self._cacheDir
def image_path_at(self, i):
return self.image_path_from_index(self._image_subdirs[i], self._image_index[i])
def image_path_from_index(self, subdir, fname):
image_path = os.path.join(self._imgDir, subdir, fname)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
image_index = []
image_subdirs = []
for subdir in self._imgSubdirs[self._image_set]:
imgFilenames = getFilesInDirectory(os.path.join(self._imgDir,subdir), self._image_ext)
image_index += imgFilenames
image_subdirs += [subdir] * len(imgFilenames)
return image_index, image_subdirs
def gt_roidb(self):
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cp.load(fid)
print ('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_annotation(i) for i in range(self.num_images)]
with open(cache_file, 'wb') as fid:
cp.dump(gt_roidb, fid, cp.HIGHEST_PROTOCOL)
print ('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def selective_search_roidb(self):
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
if sys.version_info[0] < 3:
roidb = cp.load(fid)
else:
roidb = cp.load(fid, encoding='latin1')
print ('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
if self._boAddGroundTruthRois:
roidb = self.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = ss_roidb
if self._maxNrRois and self._maxNrRois > 0:
print ("Only keeping the first %d ROIs.." % self._maxNrRois)
for i in range(self.num_images):
gt_overlaps = roidb[i]['gt_overlaps']
gt_overlaps = gt_overlaps.todense()[:self._maxNrRois]
gt_overlaps = scipy.sparse.csr_matrix(gt_overlaps)
roidb[i]['gt_overlaps'] = gt_overlaps
roidb[i]['boxes'] = roidb[i]['boxes'][:self._maxNrRois,:]
roidb[i]['gt_classes'] = roidb[i]['gt_classes'][:self._maxNrRois]
with open(cache_file, 'wb') as fid:
cp.dump(roidb, fid, cp.HIGHEST_PROTOCOL)
print ('wrote ss roidb to {}'.format(cache_file))
return roidb
def _load_selective_search_roidb(self, gt_roidb):
box_list = []
for imgFilename, subdir in zip(self._image_index, self._image_subdirs):
roiPath = "{}/{}/{}.roi.txt".format(self._roiDir, subdir, imgFilename[:-4])
assert os.path.exists(roiPath), "Error: rois file not found: " + roiPath
rois = np.loadtxt(roiPath, np.int32)
box_list.append(rois)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_annotation(self, imgIndex):
if self._image_subdirs[imgIndex].lower() == "negative":
return None
imgPath = self.image_path_at(imgIndex)
bboxesPaths = imgPath[:-4] + ".bboxes.tsv"
labelsPaths = imgPath[:-4] + ".bboxes.labels.tsv"
assert os.path.exists(bboxesPaths), "Error: ground truth bounding boxes file not found: " + bboxesPaths
assert os.path.exists(labelsPaths), "Error: ground truth labels file not found: " + bboxesPaths
bboxes = np.loadtxt(bboxesPaths, np.float32)
labels = readFile(labelsPaths)
# we need to make sure the input is treated as a multi dimensional array instead of a list/ 1D array
#if len(bboxes.shape) == 1:
if len(bboxes)>0 and type(bboxes[0]) == np.float32:
bboxes = np.array([bboxes])
#remove boxes marked as 'undecided' or 'exclude'
indicesToKeep = find(labels, lambda x: x!='EXCLUDE' and x!='UNDECIDED')
bboxes = [bboxes[i] for i in indicesToKeep]
labels = [labels[i] for i in indicesToKeep]
# Load object bounding boxes into a data frame.
num_objs = len(bboxes)
boxes = np.zeros((num_objs,4), dtype=np.uint16)
gt_classes = np.zeros(num_objs, dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
for bboxIndex,(bbox,label) in enumerate(zip(bboxes,labels)):
cls = self._class_to_ind[label] #.decode('utf-8')]
boxes[bboxIndex, :] = bbox
gt_classes[bboxIndex] = cls
overlaps[bboxIndex, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False}
# main call to compute per-calass average precision
# shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score
# (see also test_net() in fastRCNN\test.py)
def evaluate_detections(self, all_boxes, output_dir, use_07_metric=False, overlapThreshold = 0.5):
aps = []
for classIndex, className in enumerate(self._classes):
if className != '__background__':
rec, prec, ap = self._evaluate_detections(classIndex, all_boxes, use_07_metric, overlapThreshold)
aps += [[className,ap]]
print('AP for {:>15} = {:.4f}'.format(className, ap))
print('Mean AP = {:.4f}'.format(np.nanmean(getColumn(aps,1))))
return aps
def _evaluate_detections(self, classIndex, all_boxes, use_07_metric = False, overlapThreshold = 0.5):
assert (len(all_boxes) == self.num_classes)
assert (len(all_boxes[0]) == self.num_images)
# load ground truth annotations for this class
gtInfos = []
for imgIndex in range(self.num_images):
imgPath = self.image_path_at(imgIndex)
imgSubir = os.path.normpath(imgPath).split(os.path.sep)[-2]
if imgSubir != 'negative':
gtBoxes, gtLabels = readGtAnnotation(imgPath)
gtBoxes = [box for box, label in zip(gtBoxes, gtLabels) if label == self.classes[classIndex]] #.decode('utf-8')
else:
gtBoxes = []
gtInfos.append({'bbox': np.array(gtBoxes),
'difficult': [False] * len(gtBoxes),
'det': [False] * len(gtBoxes)})
# parse detections for this class
# shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score
detBboxes = []
detImgIndices = []
detConfidences = []
for imgIndex in range(self.num_images):
dets = all_boxes[classIndex][imgIndex]
if dets != []:
for k in range(dets.shape[0]):
detImgIndices.append(imgIndex)
detConfidences.append(dets[k, -1])
# the VOCdevkit expects 1-based indices
detBboxes.append([dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1])
detBboxes = np.array(detBboxes)
detConfidences = np.array(detConfidences)
# debug: visualize GT and detections
# if classIndex == 15: # and imgPath.endswith("WIN_20160803_11_42_36_Pro.jpg"):
# imgIndex = 6
# imgPath = self.image_path_at(imgIndex)
# img = imread(imgPath)
# tmp_gtBoxes = gtInfos[imgIndex]['bbox']
# inds = np.where(np.array(detImgIndices) == 1)[0]
# tmp_detBoxes = detBboxes[inds]
# print(detConfidences[inds])
# drawRectangles(img, tmp_gtBoxes, color = (255, 0, 0)) #thickness=thickness)
# drawRectangles(img, tmp_detBoxes, color= (0, 255, 0)) # thickness=thickness)
# imshow(img, maxDim=800)
# compute precision / recall / ap
rec, prec, ap = self._voc_computePrecisionRecallAp(
class_recs=gtInfos,
confidence=detConfidences,
image_ids=detImgIndices,
BB=detBboxes,
ovthresh=overlapThreshold,
use_07_metric=use_07_metric)
return rec, prec, ap
#########################################################################
# Python evaluation functions (copied/refactored from faster-RCNN)
##########################################################################
def _voc_computePrecisionRecallAp(self, class_recs, confidence, image_ids, BB, ovthresh=0.5, use_07_metric=False):
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
npos = sum([len(cr['bbox']) for cr in class_recs])
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = computeAveragePrecision(rec, prec, use_07_metric)
return rec, prec, ap | true | true |
f73e530ee5e971492182a88af77326412c346bd2 | 638 | py | Python | ch2o/tests/syntax/MultiFunction.py | disktnk/chainer-compiler | 5cfd027b40ea6e4abf73eb42be70b4fba74d1cde | [
"MIT"
] | null | null | null | ch2o/tests/syntax/MultiFunction.py | disktnk/chainer-compiler | 5cfd027b40ea6e4abf73eb42be70b4fba74d1cde | [
"MIT"
] | null | null | null | ch2o/tests/syntax/MultiFunction.py | disktnk/chainer-compiler | 5cfd027b40ea6e4abf73eb42be70b4fba74d1cde | [
"MIT"
] | null | null | null | # coding: utf-8
import chainer
import chainer.links as L
# Network definition
class A(chainer.Chain):
def __init__(self):
super(A, self).__init__()
with self.init_scope():
self.l0 = L.Linear(7)
self.l1 = L.Linear(5)
def g(self, y):
return self.l1(y)
def forward(sl, x):
x1 = sl.l0(x)
x2 = sl.g(x1)
return x2
# ======================================
import ch2o
if __name__ == '__main__':
import numpy as np
np.random.seed(314)
model = A()
v = np.random.rand(10, 20).astype(np.float32)
ch2o.generate_testcase(model, [v])
| 16.358974 | 49 | 0.529781 |
import chainer
import chainer.links as L
class A(chainer.Chain):
def __init__(self):
super(A, self).__init__()
with self.init_scope():
self.l0 = L.Linear(7)
self.l1 = L.Linear(5)
def g(self, y):
return self.l1(y)
def forward(sl, x):
x1 = sl.l0(x)
x2 = sl.g(x1)
return x2
import ch2o
if __name__ == '__main__':
import numpy as np
np.random.seed(314)
model = A()
v = np.random.rand(10, 20).astype(np.float32)
ch2o.generate_testcase(model, [v])
| true | true |
f73e54b40868479e304268404d758135e938e86b | 3,320 | py | Python | homeworks/ask/generate_training_data.py | jsedoc/nn_chatbot | 7b4406687bad2efa14658cb5aa137065cd325073 | [
"MIT"
] | 4 | 2016-11-02T16:39:25.000Z | 2021-06-13T20:29:19.000Z | homeworks/ask/generate_training_data.py | jsedoc/nn_chatbot | 7b4406687bad2efa14658cb5aa137065cd325073 | [
"MIT"
] | null | null | null | homeworks/ask/generate_training_data.py | jsedoc/nn_chatbot | 7b4406687bad2efa14658cb5aa137065cd325073 | [
"MIT"
] | 3 | 2017-06-09T10:30:22.000Z | 2020-02-25T02:29:58.000Z | from __future__ import print_function
import readline
import json
import re
from .config.config import read_from_user
from intent_schema import IntentSchema
from argparse import ArgumentParser
def print_description(intent):
print ("<> Enter data for <{intent}> OR Press enter with empty string to move onto next intent"
.format(intent=intent["intent"]))
print ("<> Enter '<' to delete last training utterance")
print ("<> Sample utterance to remind you of the format:")
print (">> what is the recipe for {ravioli|Food} ?")
if len(intent["slots"]) > 0:
print ("<> Available slots for this intent")
for slot in intent["slots"]:
print (" - - ", slot["name"], "<TYPE: {}>".format(slot["type"]))
def validate_input_format(utterance, intent):
""" TODO add handling for bad input"""
slots = {slot["name"] for slot in intent["slots"]}
split_utt = re.split("{(.*)}", utterance)
banned = set("-/\\()^%$#@~`-_=+><;:")
for token in split_utt:
if (banned & set(token)):
print (" - Banned character found in substring", token)
print (" - Banned character list", banned)
return False
if "|" in token:
split_token = token.split("|")
if len(split_token)!=2:
print (" - Error, token is incorrect in", token, split_token)
return False
word, slot = split_token
if slot.strip() not in slots:
print (" -", slot, "is not a valid slot for this Intent, valid slots are", slots)
return False
return True
def lowercase_utterance(utterance):
split_utt = re.split("({.*})", utterance)
def lower_case_split(token):
if "|" in token:
phrase, slot = token.split("|")
return "|".join([phrase.strip().lower(), slot.strip()])
else:
return token.lower()
return " ".join([lower_case_split(token) for token in split_utt])
def generate_training_data(schema):
print ("Loaded intent schema, populating intents")
training_data = []
for intent in schema.get_intents():
print_description(intent)
keep_prompting = True
while keep_prompting:
utterance = read_from_user(str,
str(len(training_data))+". "+intent["intent"]+'\t')
if utterance.strip() == "":
keep_prompting = False
elif utterance.strip() == "<":
print (" - Discarded utterance: ", training_data.pop())
elif validate_input_format(utterance, intent):
training_data.append("\t".join([intent["intent"], lowercase_utterance(utterance)]))
else:
print (" - Discarded utterance:", utterance)
return training_data
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--intent_schema', '-i', required=True)
parser.add_argument('--output', '-o', default='utterances.txt')
args = parser.parse_args()
intent_schema = IntentSchema.from_filename(args.intent_schema)
with open(args.output, 'w') as utterance_file:
utterance_file.write("\n".join(generate_training_data(intent_schema)))
| 39.058824 | 99 | 0.591265 | from __future__ import print_function
import readline
import json
import re
from .config.config import read_from_user
from intent_schema import IntentSchema
from argparse import ArgumentParser
def print_description(intent):
print ("<> Enter data for <{intent}> OR Press enter with empty string to move onto next intent"
.format(intent=intent["intent"]))
print ("<> Enter '<' to delete last training utterance")
print ("<> Sample utterance to remind you of the format:")
print (">> what is the recipe for {ravioli|Food} ?")
if len(intent["slots"]) > 0:
print ("<> Available slots for this intent")
for slot in intent["slots"]:
print (" - - ", slot["name"], "<TYPE: {}>".format(slot["type"]))
def validate_input_format(utterance, intent):
slots = {slot["name"] for slot in intent["slots"]}
split_utt = re.split("{(.*)}", utterance)
banned = set("-/\\()^%$#@~`-_=+><;:")
for token in split_utt:
if (banned & set(token)):
print (" - Banned character found in substring", token)
print (" - Banned character list", banned)
return False
if "|" in token:
split_token = token.split("|")
if len(split_token)!=2:
print (" - Error, token is incorrect in", token, split_token)
return False
word, slot = split_token
if slot.strip() not in slots:
print (" -", slot, "is not a valid slot for this Intent, valid slots are", slots)
return False
return True
def lowercase_utterance(utterance):
split_utt = re.split("({.*})", utterance)
def lower_case_split(token):
if "|" in token:
phrase, slot = token.split("|")
return "|".join([phrase.strip().lower(), slot.strip()])
else:
return token.lower()
return " ".join([lower_case_split(token) for token in split_utt])
def generate_training_data(schema):
print ("Loaded intent schema, populating intents")
training_data = []
for intent in schema.get_intents():
print_description(intent)
keep_prompting = True
while keep_prompting:
utterance = read_from_user(str,
str(len(training_data))+". "+intent["intent"]+'\t')
if utterance.strip() == "":
keep_prompting = False
elif utterance.strip() == "<":
print (" - Discarded utterance: ", training_data.pop())
elif validate_input_format(utterance, intent):
training_data.append("\t".join([intent["intent"], lowercase_utterance(utterance)]))
else:
print (" - Discarded utterance:", utterance)
return training_data
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--intent_schema', '-i', required=True)
parser.add_argument('--output', '-o', default='utterances.txt')
args = parser.parse_args()
intent_schema = IntentSchema.from_filename(args.intent_schema)
with open(args.output, 'w') as utterance_file:
utterance_file.write("\n".join(generate_training_data(intent_schema)))
| true | true |
f73e54b882403131dc7c483d960bce04c041cfbd | 925 | py | Python | test01.py | 888dahong888/open3dTest | cf28df9f9f5d24b1ca614414804a1c18d349467c | [
"Apache-2.0"
] | 1 | 2020-08-05T02:20:01.000Z | 2020-08-05T02:20:01.000Z | test01.py | 888dahong888/open3dTest | cf28df9f9f5d24b1ca614414804a1c18d349467c | [
"Apache-2.0"
] | null | null | null | test01.py | 888dahong888/open3dTest | cf28df9f9f5d24b1ca614414804a1c18d349467c | [
"Apache-2.0"
] | null | null | null | #读写点云,网格,图片文件
import numpy as np
import open3d as o3d
pcd=o3d.io.read_point_cloud("data/rs1.pcd")
print(pcd) #打印点云数量
#可视化一下
o3d.visualization.draw_geometries([pcd])
#下采样
downpcd = pcd.voxel_down_sample(voxel_size=0.05)
o3d.visualization.draw_geometries([downpcd])
#计算法向量
downpcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
o3d.visualization.draw_geometries([downpcd])
#原来这样获取
print("Print a normal vector of the 0th point")
print(downpcd.normals[0])
print("Print the normal vectors of the first 10 points")
print(np.asarray(downpcd.normals)[:10, :])
o3d.io.write_point_cloud("data/copy_rs1.pcd",pcd)
#打印网格
mesh=o3d.io.read_triangle_mesh("data/Box.stl")
o3d.visualization.draw_geometries([mesh])
print(mesh)
o3d.io.write_triangle_mesh("data/copy_box.stl",mesh)
#读写图像
img=o3d.io.read_image('data/image.jpg')
print(img)
o3d.io.write_image("data/copy_img.jpg",img)
| 24.342105 | 98 | 0.776216 |
import numpy as np
import open3d as o3d
pcd=o3d.io.read_point_cloud("data/rs1.pcd")
print(pcd)
o3d.visualization.draw_geometries([pcd])
downpcd = pcd.voxel_down_sample(voxel_size=0.05)
o3d.visualization.draw_geometries([downpcd])
downpcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
o3d.visualization.draw_geometries([downpcd])
print("Print a normal vector of the 0th point")
print(downpcd.normals[0])
print("Print the normal vectors of the first 10 points")
print(np.asarray(downpcd.normals)[:10, :])
o3d.io.write_point_cloud("data/copy_rs1.pcd",pcd)
mesh=o3d.io.read_triangle_mesh("data/Box.stl")
o3d.visualization.draw_geometries([mesh])
print(mesh)
o3d.io.write_triangle_mesh("data/copy_box.stl",mesh)
img=o3d.io.read_image('data/image.jpg')
print(img)
o3d.io.write_image("data/copy_img.jpg",img)
| true | true |
f73e5547a542c0b70784f59c095e0b4d42d6632c | 413 | py | Python | django/contrib/gis/db/backends/postgis/features.py | ni-ning/django | 2e7ba6057cfc82a15a22b6021cd60cf307152e2d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 2 | 2021-01-10T19:18:21.000Z | 2021-01-11T13:42:42.000Z | django/contrib/gis/db/backends/postgis/features.py | ni-ning/django | 2e7ba6057cfc82a15a22b6021cd60cf307152e2d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null | django/contrib/gis/db/backends/postgis/features.py | ni-ning/django | 2e7ba6057cfc82a15a22b6021cd60cf307152e2d | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null | from django.contrib.gis.db.backends.base.features import BaseSpatialFeatures
from django.db.backends.postgresql.features import (
DatabaseFeatures as Psycopg2DatabaseFeatures,
)
class DatabaseFeatures(BaseSpatialFeatures, Psycopg2DatabaseFeatures):
supports_geography = True
supports_3d_storage = True
supports_3d_functions = True
supports_raster = True
supports_empty_geometries = True
| 31.769231 | 76 | 0.811138 | from django.contrib.gis.db.backends.base.features import BaseSpatialFeatures
from django.db.backends.postgresql.features import (
DatabaseFeatures as Psycopg2DatabaseFeatures,
)
class DatabaseFeatures(BaseSpatialFeatures, Psycopg2DatabaseFeatures):
supports_geography = True
supports_3d_storage = True
supports_3d_functions = True
supports_raster = True
supports_empty_geometries = True
| true | true |
f73e557414dd0e9dbae94d56b7866f877271804d | 2,272 | py | Python | pyextension/test.py | rug/robosoc2d | 7a018f8ef6974f96a44df018b8adb185e2c07c63 | [
"MIT"
] | null | null | null | pyextension/test.py | rug/robosoc2d | 7a018f8ef6974f96a44df018b8adb185e2c07c63 | [
"MIT"
] | null | null | null | pyextension/test.py | rug/robosoc2d | 7a018f8ef6974f96a44df018b8adb185e2c07c63 | [
"MIT"
] | null | null | null | import robosoc2d
sim_handle = robosoc2d.build_simpleplayer_simulator([], 4, [], 4)
while robosoc2d.simulator_step_if_playing(sim_handle):
print(robosoc2d.simulator_get_state_string(sim_handle))
print(robosoc2d.simulator_get_state_string(sim_handle))
robosoc2d.simulator_delete_all()
class MyPlayer:
def __init__(self):
self.c=0
def step(self, env, pitch, settings, team1, team2):
print("player step says that's tick per time= "+str(settings.ticks_per_time)+" , internal variable c="+str(self.c))
self.c+=1
action=(robosoc2d.ACTION_DASH, 1.5, 0.06, 0.0)
return action
my_team=[MyPlayer() for n in range(4) ]
sim_handle = robosoc2d.build_simpleplayer_simulator(my_team, 0, [], 4) #, "my team", "simple players team", robosoc2d.get_seed_by_current_time(),sett)
robosoc2d.simulator_step_if_playing(handle=sim_handle)
robosoc2d.simulator_step_if_playing(handle=sim_handle)
print(robosoc2d.simulator_get_state_string(sim_handle))
robosoc2d.simulator_delete(sim_handle)
sett=robosoc2d.get_default_settings()
sett.ticks_per_time=421
sim_handle = robosoc2d.build_simpleplayer_simulator([], 4, [], 4, "The snakes", "Pacific United", robosoc2d.get_seed_by_current_time(), sett)
#sim_handle = robosoc2d.build_simpleplayer_simulator([], 4, [], 4, "The snakes", "Pacific United", game_settings=sett, random_seed=robosoc2d.get_seed_by_current_time())
robosoc2d.simulator_play_game(sim_handle)
simState = robosoc2d.simulator_get_game_state(sim_handle)
print(simState[0])
mydict=eval(str(simState[0]))
print(str(mydict["n_players1"]))
print(simState[1])
mydict=eval(str(simState[1]))
print(str(mydict["goal_kick_rx"]))
print(simState[2])
mydict=eval(str(simState[2]))
print(str(mydict["ticks_per_time"]))
aplayerinfo=simState[3][0]
print(aplayerinfo)
mydict=eval(str(aplayerinfo))
print(str(mydict["direction"]))
print("random seed: "+str(robosoc2d.simulator_get_random_seed(sim_handle)))
print(robosoc2d.simulator_get_team_names(sim_handle))
simState = robosoc2d.simulator_get_game_state(sim_handle)
copiedEnv =simState[0].copy()
copiedEnv.tick=100
myState = robosoc2d.environment()
print(simState[0])
print(copiedEnv)
print(myState)
print(robosoc2d.simulator_is_valid(sim_handle))
print(robosoc2d.simulator_is_valid(4000))
| 37.245902 | 169 | 0.78081 | import robosoc2d
sim_handle = robosoc2d.build_simpleplayer_simulator([], 4, [], 4)
while robosoc2d.simulator_step_if_playing(sim_handle):
print(robosoc2d.simulator_get_state_string(sim_handle))
print(robosoc2d.simulator_get_state_string(sim_handle))
robosoc2d.simulator_delete_all()
class MyPlayer:
def __init__(self):
self.c=0
def step(self, env, pitch, settings, team1, team2):
print("player step says that's tick per time= "+str(settings.ticks_per_time)+" , internal variable c="+str(self.c))
self.c+=1
action=(robosoc2d.ACTION_DASH, 1.5, 0.06, 0.0)
return action
my_team=[MyPlayer() for n in range(4) ]
sim_handle = robosoc2d.build_simpleplayer_simulator(my_team, 0, [], 4) #, "my team", "simple players team", robosoc2d.get_seed_by_current_time(),sett)
robosoc2d.simulator_step_if_playing(handle=sim_handle)
robosoc2d.simulator_step_if_playing(handle=sim_handle)
print(robosoc2d.simulator_get_state_string(sim_handle))
robosoc2d.simulator_delete(sim_handle)
sett=robosoc2d.get_default_settings()
sett.ticks_per_time=421
sim_handle = robosoc2d.build_simpleplayer_simulator([], 4, [], 4, "The snakes", "Pacific United", robosoc2d.get_seed_by_current_time(), sett)
#sim_handle = robosoc2d.build_simpleplayer_simulator([], 4, [], 4, "The snakes", "Pacific United", game_settings=sett, random_seed=robosoc2d.get_seed_by_current_time())
robosoc2d.simulator_play_game(sim_handle)
simState = robosoc2d.simulator_get_game_state(sim_handle)
print(simState[0])
mydict=eval(str(simState[0]))
print(str(mydict["n_players1"]))
print(simState[1])
mydict=eval(str(simState[1]))
print(str(mydict["goal_kick_rx"]))
print(simState[2])
mydict=eval(str(simState[2]))
print(str(mydict["ticks_per_time"]))
aplayerinfo=simState[3][0]
print(aplayerinfo)
mydict=eval(str(aplayerinfo))
print(str(mydict["direction"]))
print("random seed: "+str(robosoc2d.simulator_get_random_seed(sim_handle)))
print(robosoc2d.simulator_get_team_names(sim_handle))
simState = robosoc2d.simulator_get_game_state(sim_handle)
copiedEnv =simState[0].copy()
copiedEnv.tick=100
myState = robosoc2d.environment()
print(simState[0])
print(copiedEnv)
print(myState)
print(robosoc2d.simulator_is_valid(sim_handle))
print(robosoc2d.simulator_is_valid(4000))
| true | true |
f73e56c59209c3271c0651d49f017a03f54bbf3b | 342 | py | Python | v1/addresses/serializers.py | DucPhamTV/MaiTet | 44a1465a3239808f6640592ba666d9c5449c0ef4 | [
"MIT"
] | null | null | null | v1/addresses/serializers.py | DucPhamTV/MaiTet | 44a1465a3239808f6640592ba666d9c5449c0ef4 | [
"MIT"
] | 15 | 2021-02-20T12:03:33.000Z | 2021-07-26T10:15:03.000Z | v1/addresses/serializers.py | DucPhamTV/MaiTet | 44a1465a3239808f6640592ba666d9c5449c0ef4 | [
"MIT"
] | null | null | null | from rest_framework.serializers import ModelSerializer, StringRelatedField
from v1.addresses.models.addresses import Address
class AddressSerializer(ModelSerializer):
province = StringRelatedField()
district = StringRelatedField()
ward = StringRelatedField()
class Meta:
model = Address
fields = '__all__'
| 24.428571 | 74 | 0.748538 | from rest_framework.serializers import ModelSerializer, StringRelatedField
from v1.addresses.models.addresses import Address
class AddressSerializer(ModelSerializer):
province = StringRelatedField()
district = StringRelatedField()
ward = StringRelatedField()
class Meta:
model = Address
fields = '__all__'
| true | true |
f73e5859f8b430977d4fa772a35d8d6d957f8e82 | 289 | py | Python | scripts/copy_to_staging_area.py | thurn/dungeonstrike | c9eb8867a7c9d5f6702d3fc743cc39d3bfc4a52d | [
"Apache-2.0"
] | 1 | 2017-10-07T14:40:12.000Z | 2017-10-07T14:40:12.000Z | scripts/copy_to_staging_area.py | thurn/dungeonstrike | c9eb8867a7c9d5f6702d3fc743cc39d3bfc4a52d | [
"Apache-2.0"
] | null | null | null | scripts/copy_to_staging_area.py | thurn/dungeonstrike | c9eb8867a7c9d5f6702d3fc743cc39d3bfc4a52d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2.7
import os
import lib
env = lib.init()
print("\nCopying all project files to staging directory...\n")
lib.call([
"rsync", "--archive", "--delete", "--quiet",
env.project_root + os.sep, # Need trailing / to make rsync not create a subdir
env.staging_path
])
| 22.230769 | 80 | 0.67474 |
import os
import lib
env = lib.init()
print("\nCopying all project files to staging directory...\n")
lib.call([
"rsync", "--archive", "--delete", "--quiet",
env.project_root + os.sep,
env.staging_path
])
| true | true |
f73e5bfa9700cd4b4cdfe40b8976cb6923234da6 | 4,234 | py | Python | simplejson/tests/test_unicode.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | 20 | 2015-01-26T01:39:44.000Z | 2020-05-30T19:04:14.000Z | lib/simplejson/tests/test_unicode.py | motord/Motorcycle-Diaries | bb5e5e2d4d79573b4231e760d7662db26c03a55e | [
"BSD-3-Clause"
] | 6 | 2015-02-23T06:47:09.000Z | 2015-06-04T20:31:30.000Z | lib/simplejson/tests/test_unicode.py | motord/Motorcycle-Diaries | bb5e5e2d4d79573b4231e760d7662db26c03a55e | [
"BSD-3-Clause"
] | 13 | 2015-01-26T01:39:45.000Z | 2022-03-09T16:45:09.000Z | from unittest import TestCase
import simplejson as json
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEquals(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEquals(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEquals(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEquals(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEquals(j, u'"' + u + u'"')
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEquals(j, u'["' + u + u'"]')
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEquals(json.loads('"' + u + '"'), u)
self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
#s = '"\\u{0:04x}"'.format(i)
s = '"\\u%04x"' % (i,)
self.assertEquals(json.loads(s), u)
def test_object_pairs_hook_with_unicode(self):
s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
(u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
self.assertEqual(json.loads(s), eval(s))
self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p)
od = json.loads(s, object_pairs_hook=json.OrderedDict)
self.assertEqual(od, json.OrderedDict(p))
self.assertEqual(type(od), json.OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(json.loads(s,
object_pairs_hook=json.OrderedDict,
object_hook=lambda x: None),
json.OrderedDict(p))
def test_default_encoding(self):
self.assertEquals(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEquals(type(json.loads(u'""')), unicode)
self.assertEquals(type(json.loads(u'"a"')), unicode)
self.assertEquals(type(json.loads(u'["a"]')[0]), unicode)
def test_ensure_ascii_false_returns_unicode(self):
# http://code.google.com/p/simplejson/issues/detail?id=48
self.assertEquals(type(json.dumps([], ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps(0, ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps({}, ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps("", ensure_ascii=False)), unicode)
def test_ensure_ascii_false_bytestring_encoding(self):
# http://code.google.com/p/simplejson/issues/detail?id=48
doc1 = {u'quux': 'Arr\xc3\xaat sur images'}
doc2 = {u'quux': u'Arr\xeat sur images'}
doc_ascii = '{"quux": "Arr\\u00eat sur images"}'
doc_unicode = u'{"quux": "Arr\xeat sur images"}'
self.assertEquals(json.dumps(doc1), doc_ascii)
self.assertEquals(json.dumps(doc2), doc_ascii)
self.assertEquals(json.dumps(doc1, ensure_ascii=False), doc_unicode)
self.assertEquals(json.dumps(doc2, ensure_ascii=False), doc_unicode)
| 42.34 | 78 | 0.600614 | from unittest import TestCase
import simplejson as json
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEquals(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEquals(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEquals(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEquals(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEquals(j, u'"' + u + u'"')
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEquals(j, u'["' + u + u'"]')
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEquals(json.loads('"' + u + '"'), u)
self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
s = '"\\u%04x"' % (i,)
self.assertEquals(json.loads(s), u)
def test_object_pairs_hook_with_unicode(self):
s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
(u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
self.assertEqual(json.loads(s), eval(s))
self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p)
od = json.loads(s, object_pairs_hook=json.OrderedDict)
self.assertEqual(od, json.OrderedDict(p))
self.assertEqual(type(od), json.OrderedDict)
self.assertEqual(json.loads(s,
object_pairs_hook=json.OrderedDict,
object_hook=lambda x: None),
json.OrderedDict(p))
def test_default_encoding(self):
self.assertEquals(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEquals(type(json.loads(u'""')), unicode)
self.assertEquals(type(json.loads(u'"a"')), unicode)
self.assertEquals(type(json.loads(u'["a"]')[0]), unicode)
def test_ensure_ascii_false_returns_unicode(self):
self.assertEquals(type(json.dumps([], ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps(0, ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps({}, ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps("", ensure_ascii=False)), unicode)
def test_ensure_ascii_false_bytestring_encoding(self):
doc1 = {u'quux': 'Arr\xc3\xaat sur images'}
doc2 = {u'quux': u'Arr\xeat sur images'}
doc_ascii = '{"quux": "Arr\\u00eat sur images"}'
doc_unicode = u'{"quux": "Arr\xeat sur images"}'
self.assertEquals(json.dumps(doc1), doc_ascii)
self.assertEquals(json.dumps(doc2), doc_ascii)
self.assertEquals(json.dumps(doc1, ensure_ascii=False), doc_unicode)
self.assertEquals(json.dumps(doc2, ensure_ascii=False), doc_unicode)
| true | true |
f73e5edb4e824e95d29c00b69907e6c73bbcc304 | 1,168 | py | Python | tests/unit/pypyr/steps/call_test.py | mofm/pypyr | f417f69ba9a607d8a93019854105cfbc4dc9c36d | [
"Apache-2.0"
] | 261 | 2020-08-18T19:31:29.000Z | 2022-03-31T14:54:06.000Z | tests/unit/pypyr/steps/call_test.py | mofm/pypyr | f417f69ba9a607d8a93019854105cfbc4dc9c36d | [
"Apache-2.0"
] | 89 | 2017-04-12T09:50:32.000Z | 2020-08-13T13:18:36.000Z | tests/unit/pypyr/steps/call_test.py | mofm/pypyr | f417f69ba9a607d8a93019854105cfbc4dc9c36d | [
"Apache-2.0"
] | 15 | 2020-09-30T12:15:50.000Z | 2022-03-30T07:25:40.000Z | """call.py unit tests."""
import logging
import pytest
from pypyr.context import Context
from pypyr.errors import Call
from pypyr.steps.call import run_step
from tests.common.utils import patch_logger
def test_call_step_dict_with_all_args():
"""Dict with all values set."""
with pytest.raises(Call) as err:
with patch_logger('pypyr.steps.call',
logging.INFO) as mock_logger_info:
run_step(Context(Context({'call': {'groups': ['b', 'c'],
'success': 'sg',
'failure': 'fg'}})))
cof = err.value
assert isinstance(cof, Call)
assert cof.groups == ['b', 'c']
assert cof.success_group == 'sg'
assert cof.failure_group == 'fg'
assert cof.original_config == ('call', {'groups': ['b', 'c'],
'success': 'sg',
'failure': 'fg'})
mock_logger_info.assert_called_once_with(
"step pypyr.steps.call about to hand over control with call: "
"Will run groups: ['b', 'c'] with success sg and failure fg")
| 36.5 | 70 | 0.544521 | import logging
import pytest
from pypyr.context import Context
from pypyr.errors import Call
from pypyr.steps.call import run_step
from tests.common.utils import patch_logger
def test_call_step_dict_with_all_args():
with pytest.raises(Call) as err:
with patch_logger('pypyr.steps.call',
logging.INFO) as mock_logger_info:
run_step(Context(Context({'call': {'groups': ['b', 'c'],
'success': 'sg',
'failure': 'fg'}})))
cof = err.value
assert isinstance(cof, Call)
assert cof.groups == ['b', 'c']
assert cof.success_group == 'sg'
assert cof.failure_group == 'fg'
assert cof.original_config == ('call', {'groups': ['b', 'c'],
'success': 'sg',
'failure': 'fg'})
mock_logger_info.assert_called_once_with(
"step pypyr.steps.call about to hand over control with call: "
"Will run groups: ['b', 'c'] with success sg and failure fg")
| true | true |
f73e5ff5e3d85e3d82016f1a921201826b0a8508 | 502 | py | Python | app/asx/wsgi.py | shift37/asx_gym | dd3d8dafae4f22ab9c9027bf362013255dbc6c36 | [
"RSA-MD"
] | null | null | null | app/asx/wsgi.py | shift37/asx_gym | dd3d8dafae4f22ab9c9027bf362013255dbc6c36 | [
"RSA-MD"
] | 3 | 2020-06-06T08:27:08.000Z | 2020-06-13T09:51:26.000Z | app/asx/wsgi.py | asxgym/asx_gym | 8b7745820c0d4cd59281acf7c003ec1f1938005a | [
"RSA-MD"
] | null | null | null | """
WSGI config for asx_data project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
import sys
sys.path.append('/var/www/axs_data/')
sys.path.append('/anaconda3/lib/python3.7/site-packages/')
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'asx_data.settings')
application = get_wsgi_application()
| 23.904762 | 78 | 0.776892 |
import os
import sys
sys.path.append('/var/www/axs_data/')
sys.path.append('/anaconda3/lib/python3.7/site-packages/')
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'asx_data.settings')
application = get_wsgi_application()
| true | true |
f73e619be549cba3f38e356780e47eff6907d447 | 435 | py | Python | VideoSensors/Sensors/CVSensor.py | CommName/WildeLifeWatcher | 3ce3b564d0e6cc81ebc2b607a712580d3c388db6 | [
"MIT"
] | null | null | null | VideoSensors/Sensors/CVSensor.py | CommName/WildeLifeWatcher | 3ce3b564d0e6cc81ebc2b607a712580d3c388db6 | [
"MIT"
] | null | null | null | VideoSensors/Sensors/CVSensor.py | CommName/WildeLifeWatcher | 3ce3b564d0e6cc81ebc2b607a712580d3c388db6 | [
"MIT"
] | null | null | null | from Sensors import Sensor
import cv2
class CVSensor(Sensor):
stream = None
def __init__(self, videoSource):
self.stream = cv2.VideoCapture(videoSource)
def __del__(self):
self.stream.release()
def getFrame(self):
if not self.stream.isOpened():
return None
ret, frame = self.stream.read()
if ret:
return frame
else:
return None
| 18.125 | 51 | 0.583908 | from Sensors import Sensor
import cv2
class CVSensor(Sensor):
stream = None
def __init__(self, videoSource):
self.stream = cv2.VideoCapture(videoSource)
def __del__(self):
self.stream.release()
def getFrame(self):
if not self.stream.isOpened():
return None
ret, frame = self.stream.read()
if ret:
return frame
else:
return None
| true | true |
f73e61a9003c699d60ac6c1c1b8d8be815a2c80e | 1,251 | py | Python | AppServer/lib/django-1.4/tests/regressiontests/queryset_pickle/tests.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/lib/django-1.4/tests/regressiontests/queryset_pickle/tests.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/lib/django-1.4/tests/regressiontests/queryset_pickle/tests.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | from __future__ import absolute_import
import pickle
import datetime
from django.test import TestCase
from .models import Group, Event, Happening
class PickleabilityTestCase(TestCase):
def assert_pickles(self, qs):
self.assertEqual(list(pickle.loads(pickle.dumps(qs))), list(qs))
def test_related_field(self):
g = Group.objects.create(name="Ponies Who Own Maybachs")
self.assert_pickles(Event.objects.filter(group=g.id))
def test_datetime_callable_default_all(self):
self.assert_pickles(Happening.objects.all())
def test_datetime_callable_default_filter(self):
self.assert_pickles(Happening.objects.filter(when=datetime.datetime.now()))
def test_lambda_as_default(self):
self.assert_pickles(Happening.objects.filter(name="test"))
def test_standalone_method_as_default(self):
self.assert_pickles(Happening.objects.filter(number1=1))
def test_staticmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number2=1))
def test_classmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number3=1))
def test_membermethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number4=1))
| 32.076923 | 83 | 0.751399 | from __future__ import absolute_import
import pickle
import datetime
from django.test import TestCase
from .models import Group, Event, Happening
class PickleabilityTestCase(TestCase):
def assert_pickles(self, qs):
self.assertEqual(list(pickle.loads(pickle.dumps(qs))), list(qs))
def test_related_field(self):
g = Group.objects.create(name="Ponies Who Own Maybachs")
self.assert_pickles(Event.objects.filter(group=g.id))
def test_datetime_callable_default_all(self):
self.assert_pickles(Happening.objects.all())
def test_datetime_callable_default_filter(self):
self.assert_pickles(Happening.objects.filter(when=datetime.datetime.now()))
def test_lambda_as_default(self):
self.assert_pickles(Happening.objects.filter(name="test"))
def test_standalone_method_as_default(self):
self.assert_pickles(Happening.objects.filter(number1=1))
def test_staticmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number2=1))
def test_classmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number3=1))
def test_membermethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number4=1))
| true | true |
f73e62a85d454f9033eb749cd08590dcae9afa98 | 3,087 | py | Python | iimmpact/models/topup_response.py | iimmpact/iimmpact_python_api | d1de12df3eb984ebc7f6adef800d7cb0265e173e | [
"MIT"
] | null | null | null | iimmpact/models/topup_response.py | iimmpact/iimmpact_python_api | d1de12df3eb984ebc7f6adef800d7cb0265e173e | [
"MIT"
] | null | null | null | iimmpact/models/topup_response.py | iimmpact/iimmpact_python_api | d1de12df3eb984ebc7f6adef800d7cb0265e173e | [
"MIT"
] | null | null | null | # coding: utf-8
"""
IIMMPACT API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2020-09-14T13:01:14Z
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TopupResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'TopupResponseData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""TopupResponse - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this TopupResponse. # noqa: E501
:return: The data of this TopupResponse. # noqa: E501
:rtype: TopupResponseData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this TopupResponse.
:param data: The data of this TopupResponse. # noqa: E501
:type: TopupResponseData
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TopupResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TopupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.612069 | 119 | 0.553936 |
import pprint
import re
import six
class TopupResponse(object):
swagger_types = {
'data': 'TopupResponseData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None):
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TopupResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, TopupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73e63e237ea90b3e82dac4e266100933370f346 | 2,516 | py | Python | examples/run_mtsv1.py | tuahk/NiaPy | c863d801fda8e1949a3ca716a4de7c7ca3d0ea16 | [
"MIT"
] | null | null | null | examples/run_mtsv1.py | tuahk/NiaPy | c863d801fda8e1949a3ca716a4de7c7ca3d0ea16 | [
"MIT"
] | null | null | null | examples/run_mtsv1.py | tuahk/NiaPy | c863d801fda8e1949a3ca716a4de7c7ca3d0ea16 | [
"MIT"
] | null | null | null | # encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
import random
import logging
from NiaPy.algorithms.other import MultipleTrajectorySearchV1
from NiaPy.benchmarks.utility import TaskConvPrint, TaskConvPlot, OptimizationType
from margparser import getDictArgs
logging.basicConfig()
logger = logging.getLogger('examples')
logger.setLevel('INFO')
# For reproducive results
class MinMB(object):
def __init__(self):
self.Lower = -11
self.Upper = 11
def function(self):
def evaluate(D, sol):
val = 0.0
for i in range(D): val = val + sol[i] * sol[i]
return val
return evaluate
class MaxMB(MinMB):
def function(self):
f = MinMB.function(self)
def e(D, sol): return -f(D, sol)
return e
def simple_example(runs=10, D=10, nFES=50000, nGEN=10000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **kn):
for i in range(runs):
algo = MultipleTrajectorySearchV1(D=D, nFES=nFES, nGEN=nGEN, n=15, C_a=1, C_r=0.5, optType=optType, benchmark=optFunc())
best = algo.run()
logger.info('%s %s' % (best[0], best[1]))
def logging_example(D=10, nFES=50000, nGEN=100000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **kn):
task = TaskConvPrint(D=D, nFES=nFES, nGEN=nGEN, optType=optType, benchmark=optFunc())
algo = MultipleTrajectorySearchV1(task=task, n=15, C_a=1, C_r=0.5)
best = algo.run()
logger.info('%s %s' % (best[0], best[1]))
def plot_example(D=10, nFES=50000, nGEN=100000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **kn):
task = TaskConvPlot(D=D, nFES=nFES, nGEN=nGEN, optType=optType, benchmark=optFunc())
algo = MultipleTrajectorySearchV1(task=task, n=15, C_a=1, C_r=0.5)
best = algo.run()
logger.info('%s %s' % (best[0], best[1]))
input('Press [enter] to continue')
def getOptType(strtype):
if strtype == 'min': return OptimizationType.MINIMIZATION, MinMB
elif strtype == 'max': return OptimizationType.MAXIMIZATION, MaxMB
else: return None
if __name__ == '__main__':
pargs = getDictArgs(sys.argv[1:])
optType, optFunc = getOptType(pargs.pop('optType', 'min'))
if not pargs['runType']: simple_example(optType=optType, optFunc=optFunc, **pargs)
elif pargs['runType'] == 'log': logging_example(optType=optType, optFunc=optFunc, **pargs)
elif pargs['runType'] == 'plot': plot_example(optType=optType, optFunc=optFunc, **pargs)
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 36.463768 | 129 | 0.726948 |
import sys
sys.path.append('../')
import random
import logging
from NiaPy.algorithms.other import MultipleTrajectorySearchV1
from NiaPy.benchmarks.utility import TaskConvPrint, TaskConvPlot, OptimizationType
from margparser import getDictArgs
logging.basicConfig()
logger = logging.getLogger('examples')
logger.setLevel('INFO')
class MinMB(object):
def __init__(self):
self.Lower = -11
self.Upper = 11
def function(self):
def evaluate(D, sol):
val = 0.0
for i in range(D): val = val + sol[i] * sol[i]
return val
return evaluate
class MaxMB(MinMB):
def function(self):
f = MinMB.function(self)
def e(D, sol): return -f(D, sol)
return e
def simple_example(runs=10, D=10, nFES=50000, nGEN=10000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **kn):
for i in range(runs):
algo = MultipleTrajectorySearchV1(D=D, nFES=nFES, nGEN=nGEN, n=15, C_a=1, C_r=0.5, optType=optType, benchmark=optFunc())
best = algo.run()
logger.info('%s %s' % (best[0], best[1]))
def logging_example(D=10, nFES=50000, nGEN=100000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **kn):
task = TaskConvPrint(D=D, nFES=nFES, nGEN=nGEN, optType=optType, benchmark=optFunc())
algo = MultipleTrajectorySearchV1(task=task, n=15, C_a=1, C_r=0.5)
best = algo.run()
logger.info('%s %s' % (best[0], best[1]))
def plot_example(D=10, nFES=50000, nGEN=100000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **kn):
task = TaskConvPlot(D=D, nFES=nFES, nGEN=nGEN, optType=optType, benchmark=optFunc())
algo = MultipleTrajectorySearchV1(task=task, n=15, C_a=1, C_r=0.5)
best = algo.run()
logger.info('%s %s' % (best[0], best[1]))
input('Press [enter] to continue')
def getOptType(strtype):
if strtype == 'min': return OptimizationType.MINIMIZATION, MinMB
elif strtype == 'max': return OptimizationType.MAXIMIZATION, MaxMB
else: return None
if __name__ == '__main__':
pargs = getDictArgs(sys.argv[1:])
optType, optFunc = getOptType(pargs.pop('optType', 'min'))
if not pargs['runType']: simple_example(optType=optType, optFunc=optFunc, **pargs)
elif pargs['runType'] == 'log': logging_example(optType=optType, optFunc=optFunc, **pargs)
elif pargs['runType'] == 'plot': plot_example(optType=optType, optFunc=optFunc, **pargs)
| true | true |
f73e6598db4695658b8907adb5d1a227b4ac9667 | 12,586 | py | Python | src/tests/test_e2e.py | vdaas/vald-client-python | 8727351dc1bef18e0bf0e5a7fd26a33d668f36e9 | [
"Apache-2.0"
] | 7 | 2021-04-08T00:19:23.000Z | 2021-10-11T06:19:02.000Z | src/tests/test_e2e.py | vdaas/vald-client-python | 8727351dc1bef18e0bf0e5a7fd26a33d668f36e9 | [
"Apache-2.0"
] | 8 | 2020-01-28T10:10:37.000Z | 2021-08-24T08:05:08.000Z | src/tests/test_e2e.py | vdaas/vald-client-python | 8727351dc1bef18e0bf0e5a7fd26a33d668f36e9 | [
"Apache-2.0"
] | null | null | null | import unittest
import json
import time
import grpc
from vald.v1.agent.core import agent_pb2_grpc
from vald.v1.vald import insert_pb2_grpc
from vald.v1.vald import search_pb2_grpc
from vald.v1.vald import update_pb2_grpc
from vald.v1.vald import upsert_pb2_grpc
from vald.v1.vald import remove_pb2_grpc
from vald.v1.vald import object_pb2_grpc
from vald.v1.payload import payload_pb2
class TestValdE2E(unittest.TestCase):
"""e2e test for vald-client-python
"""
def __init__(self, *args, **kwargs):
super(TestValdE2E, self).__init__(*args, **kwargs)
self.data = json.load(open("wordvecs1000.json", "r"))
def setUp(self):
options = [("grpc.keepalive_time_ms", 10000),
("grpc.keepalive_timeout_ms", 5000),
("grpc.client_channel_backup_poll_interval_ms", 100)]
self.channel = grpc.insecure_channel(
target="localhost:8081", options=options)
def tearDown(self):
self.channel.close()
def test_insert(self):
stub = insert_pb2_grpc.InsertStub(self.channel)
vec = payload_pb2.Object.Vector(
id=self.data[0]["id"], vector=self.data[0]["vector"])
cfg = payload_pb2.Insert.Config(skip_strict_exist_check=True)
result = stub.Insert(
payload_pb2.Insert.Request(vector=vec, config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_insert(self):
stub = insert_pb2_grpc.InsertStub(self.channel)
cfg = payload_pb2.Insert.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Insert.Request(
vector=vec, config=cfg))
results = stub.MultiInsert(
payload_pb2.Insert.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_insert(self):
stub = insert_pb2_grpc.InsertStub(self.channel)
cfg = payload_pb2.Insert.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 100):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Insert.Request(
vector=vec, config=cfg))
results = stub.StreamInsert(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
def test_create_index(self):
stub = agent_pb2_grpc.AgentStub(self.channel)
result = stub.CreateIndex(
payload_pb2.Control.CreateIndexRequest(pool_size=10000))
self.assertIsInstance(result, payload_pb2.Empty)
def test_save_index(self):
stub = agent_pb2_grpc.AgentStub(self.channel)
result = stub.SaveIndex(payload_pb2.Empty())
self.assertIsInstance(result, payload_pb2.Empty)
def test_index_info(self):
stub = agent_pb2_grpc.AgentStub(self.channel)
result = stub.IndexInfo(payload_pb2.Empty())
self.assertIsInstance(result, payload_pb2.Info.Index.Count)
self.assertEqual(result.stored, 99)
self.assertEqual(result.uncommitted, 0)
def test_exists(self):
stub = object_pb2_grpc.ObjectStub(self.channel)
result = stub.Exists(
payload_pb2.Object.ID(id=self.data[0]["id"]))
self.assertIsInstance(result, payload_pb2.Object.ID)
def test_get_object(self):
stub = object_pb2_grpc.ObjectStub(self.channel)
req = payload_pb2.Object.VectorRequest(
id=payload_pb2.Object.ID(id=self.data[0]["id"]))
result = stub.GetObject(req)
self.assertIsInstance(result, payload_pb2.Object.Vector)
self.assertEqual(result.id, self.data[0]["id"])
def test_stream_get_object(self):
stub = object_pb2_grpc.ObjectStub(self.channel)
requests = []
for i in range(0, 10):
requests.append(payload_pb2.Object.VectorRequest(
id=payload_pb2.Object.ID(id=self.data[i]["id"])))
results = stub.StreamGetObject(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamVector)
def test_search(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
result = stub.Search(payload_pb2.Search.Request(
vector=self.data[0]["vector"], config=cfg))
self.assertIsInstance(result, payload_pb2.Search.Response)
self.assertEqual(len(result.results), 3)
def test_multi_search(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(1, 10):
requests.append(payload_pb2.Search.Request(
vector=self.data[i]["vector"], config=cfg))
results = stub.MultiSearch(
payload_pb2.Search.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Search.Responses)
for response in results.responses:
self.assertIsInstance(response, payload_pb2.Search.Response)
self.assertEqual(len(response.results), 3)
def test_stream_search(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(11, 20):
requests.append(payload_pb2.Search.Request(
vector=self.data[i]["vector"], config=cfg))
results = stub.StreamSearch(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Search.StreamResponse)
self.assertIsNotNone(result.response)
def test_search_id(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
result = stub.SearchByID(payload_pb2.Search.IDRequest(
id=self.data[0]["id"], config=cfg))
self.assertIsInstance(result, payload_pb2.Search.Response)
self.assertEqual(len(result.results), 3)
def test_multi_search_id(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(1, 10):
requests.append(payload_pb2.Search.IDRequest(
id=self.data[i]["id"], config=cfg))
results = stub.MultiSearchByID(
payload_pb2.Search.MultiIDRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Search.Responses)
for response in results.responses:
self.assertIsInstance(response, payload_pb2.Search.Response)
self.assertEqual(len(response.results), 3)
def test_stream_search_id(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(11, 20):
requests.append(payload_pb2.Search.IDRequest(
id=self.data[i]["id"], config=cfg))
results = stub.StreamSearchByID(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Search.StreamResponse)
self.assertIsNotNone(result.response)
def test_update(self):
stub = update_pb2_grpc.UpdateStub(self.channel)
vec = payload_pb2.Object.Vector(
id=self.data[0]["id"], vector=self.data[1]["vector"])
cfg = payload_pb2.Update.Config(skip_strict_exist_check=True)
result = stub.Update(
payload_pb2.Update.Request(vector=vec, config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_update(self):
stub = update_pb2_grpc.UpdateStub(self.channel)
cfg = payload_pb2.Update.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i+1]["vector"])
requests.append(payload_pb2.Update.Request(
vector=vec, config=cfg))
results = stub.MultiUpdate(
payload_pb2.Update.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_update(self):
stub = update_pb2_grpc.UpdateStub(self.channel)
cfg = payload_pb2.Update.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 20):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i+1]["vector"])
requests.append(payload_pb2.Update.Request(
vector=vec, config=cfg))
results = stub.StreamUpdate(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
def test_upsert(self):
stub = upsert_pb2_grpc.UpsertStub(self.channel)
vec = payload_pb2.Object.Vector(
id=self.data[0]["id"], vector=self.data[0]["vector"])
cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=True)
result = stub.Upsert(
payload_pb2.Upsert.Request(vector=vec, config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_upsert(self):
stub = upsert_pb2_grpc.UpsertStub(self.channel)
cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Upsert.Request(
vector=vec, config=cfg))
results = stub.MultiUpsert(
payload_pb2.Upsert.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_upsert(self):
stub = upsert_pb2_grpc.UpsertStub(self.channel)
cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 20):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Upsert.Request(
vector=vec, config=cfg))
results = stub.StreamUpsert(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
def test_remove(self):
stub = remove_pb2_grpc.RemoveStub(self.channel)
cfg = payload_pb2.Remove.Config(skip_strict_exist_check=True)
result = stub.Remove(
payload_pb2.Remove.Request(
id=payload_pb2.Object.ID(id=self.data[0]["id"]), config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_remove(self):
stub = remove_pb2_grpc.RemoveStub(self.channel)
cfg = payload_pb2.Remove.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
requests.append(payload_pb2.Remove.Request(
id=payload_pb2.Object.ID(id=self.data[i]["id"]), config=cfg))
results = stub.MultiRemove(
payload_pb2.Remove.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_remove(self):
stub = remove_pb2_grpc.RemoveStub(self.channel)
cfg = payload_pb2.Remove.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 20):
requests.append(payload_pb2.Remove.Request(
id=payload_pb2.Object.ID(id=self.data[i]["id"]), config=cfg))
results = stub.StreamRemove(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
| 43.701389 | 77 | 0.650405 | import unittest
import json
import time
import grpc
from vald.v1.agent.core import agent_pb2_grpc
from vald.v1.vald import insert_pb2_grpc
from vald.v1.vald import search_pb2_grpc
from vald.v1.vald import update_pb2_grpc
from vald.v1.vald import upsert_pb2_grpc
from vald.v1.vald import remove_pb2_grpc
from vald.v1.vald import object_pb2_grpc
from vald.v1.payload import payload_pb2
class TestValdE2E(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestValdE2E, self).__init__(*args, **kwargs)
self.data = json.load(open("wordvecs1000.json", "r"))
def setUp(self):
options = [("grpc.keepalive_time_ms", 10000),
("grpc.keepalive_timeout_ms", 5000),
("grpc.client_channel_backup_poll_interval_ms", 100)]
self.channel = grpc.insecure_channel(
target="localhost:8081", options=options)
def tearDown(self):
self.channel.close()
def test_insert(self):
stub = insert_pb2_grpc.InsertStub(self.channel)
vec = payload_pb2.Object.Vector(
id=self.data[0]["id"], vector=self.data[0]["vector"])
cfg = payload_pb2.Insert.Config(skip_strict_exist_check=True)
result = stub.Insert(
payload_pb2.Insert.Request(vector=vec, config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_insert(self):
stub = insert_pb2_grpc.InsertStub(self.channel)
cfg = payload_pb2.Insert.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Insert.Request(
vector=vec, config=cfg))
results = stub.MultiInsert(
payload_pb2.Insert.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_insert(self):
stub = insert_pb2_grpc.InsertStub(self.channel)
cfg = payload_pb2.Insert.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 100):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Insert.Request(
vector=vec, config=cfg))
results = stub.StreamInsert(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
def test_create_index(self):
stub = agent_pb2_grpc.AgentStub(self.channel)
result = stub.CreateIndex(
payload_pb2.Control.CreateIndexRequest(pool_size=10000))
self.assertIsInstance(result, payload_pb2.Empty)
def test_save_index(self):
stub = agent_pb2_grpc.AgentStub(self.channel)
result = stub.SaveIndex(payload_pb2.Empty())
self.assertIsInstance(result, payload_pb2.Empty)
def test_index_info(self):
stub = agent_pb2_grpc.AgentStub(self.channel)
result = stub.IndexInfo(payload_pb2.Empty())
self.assertIsInstance(result, payload_pb2.Info.Index.Count)
self.assertEqual(result.stored, 99)
self.assertEqual(result.uncommitted, 0)
def test_exists(self):
stub = object_pb2_grpc.ObjectStub(self.channel)
result = stub.Exists(
payload_pb2.Object.ID(id=self.data[0]["id"]))
self.assertIsInstance(result, payload_pb2.Object.ID)
def test_get_object(self):
stub = object_pb2_grpc.ObjectStub(self.channel)
req = payload_pb2.Object.VectorRequest(
id=payload_pb2.Object.ID(id=self.data[0]["id"]))
result = stub.GetObject(req)
self.assertIsInstance(result, payload_pb2.Object.Vector)
self.assertEqual(result.id, self.data[0]["id"])
def test_stream_get_object(self):
stub = object_pb2_grpc.ObjectStub(self.channel)
requests = []
for i in range(0, 10):
requests.append(payload_pb2.Object.VectorRequest(
id=payload_pb2.Object.ID(id=self.data[i]["id"])))
results = stub.StreamGetObject(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamVector)
def test_search(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
result = stub.Search(payload_pb2.Search.Request(
vector=self.data[0]["vector"], config=cfg))
self.assertIsInstance(result, payload_pb2.Search.Response)
self.assertEqual(len(result.results), 3)
def test_multi_search(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(1, 10):
requests.append(payload_pb2.Search.Request(
vector=self.data[i]["vector"], config=cfg))
results = stub.MultiSearch(
payload_pb2.Search.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Search.Responses)
for response in results.responses:
self.assertIsInstance(response, payload_pb2.Search.Response)
self.assertEqual(len(response.results), 3)
def test_stream_search(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(11, 20):
requests.append(payload_pb2.Search.Request(
vector=self.data[i]["vector"], config=cfg))
results = stub.StreamSearch(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Search.StreamResponse)
self.assertIsNotNone(result.response)
def test_search_id(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
result = stub.SearchByID(payload_pb2.Search.IDRequest(
id=self.data[0]["id"], config=cfg))
self.assertIsInstance(result, payload_pb2.Search.Response)
self.assertEqual(len(result.results), 3)
def test_multi_search_id(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(1, 10):
requests.append(payload_pb2.Search.IDRequest(
id=self.data[i]["id"], config=cfg))
results = stub.MultiSearchByID(
payload_pb2.Search.MultiIDRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Search.Responses)
for response in results.responses:
self.assertIsInstance(response, payload_pb2.Search.Response)
self.assertEqual(len(response.results), 3)
def test_stream_search_id(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(11, 20):
requests.append(payload_pb2.Search.IDRequest(
id=self.data[i]["id"], config=cfg))
results = stub.StreamSearchByID(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Search.StreamResponse)
self.assertIsNotNone(result.response)
def test_update(self):
stub = update_pb2_grpc.UpdateStub(self.channel)
vec = payload_pb2.Object.Vector(
id=self.data[0]["id"], vector=self.data[1]["vector"])
cfg = payload_pb2.Update.Config(skip_strict_exist_check=True)
result = stub.Update(
payload_pb2.Update.Request(vector=vec, config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_update(self):
stub = update_pb2_grpc.UpdateStub(self.channel)
cfg = payload_pb2.Update.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i+1]["vector"])
requests.append(payload_pb2.Update.Request(
vector=vec, config=cfg))
results = stub.MultiUpdate(
payload_pb2.Update.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_update(self):
stub = update_pb2_grpc.UpdateStub(self.channel)
cfg = payload_pb2.Update.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 20):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i+1]["vector"])
requests.append(payload_pb2.Update.Request(
vector=vec, config=cfg))
results = stub.StreamUpdate(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
def test_upsert(self):
stub = upsert_pb2_grpc.UpsertStub(self.channel)
vec = payload_pb2.Object.Vector(
id=self.data[0]["id"], vector=self.data[0]["vector"])
cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=True)
result = stub.Upsert(
payload_pb2.Upsert.Request(vector=vec, config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_upsert(self):
stub = upsert_pb2_grpc.UpsertStub(self.channel)
cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Upsert.Request(
vector=vec, config=cfg))
results = stub.MultiUpsert(
payload_pb2.Upsert.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_upsert(self):
stub = upsert_pb2_grpc.UpsertStub(self.channel)
cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 20):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Upsert.Request(
vector=vec, config=cfg))
results = stub.StreamUpsert(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
def test_remove(self):
stub = remove_pb2_grpc.RemoveStub(self.channel)
cfg = payload_pb2.Remove.Config(skip_strict_exist_check=True)
result = stub.Remove(
payload_pb2.Remove.Request(
id=payload_pb2.Object.ID(id=self.data[0]["id"]), config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_remove(self):
stub = remove_pb2_grpc.RemoveStub(self.channel)
cfg = payload_pb2.Remove.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
requests.append(payload_pb2.Remove.Request(
id=payload_pb2.Object.ID(id=self.data[i]["id"]), config=cfg))
results = stub.MultiRemove(
payload_pb2.Remove.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_remove(self):
stub = remove_pb2_grpc.RemoveStub(self.channel)
cfg = payload_pb2.Remove.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 20):
requests.append(payload_pb2.Remove.Request(
id=payload_pb2.Object.ID(id=self.data[i]["id"]), config=cfg))
results = stub.StreamRemove(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
| true | true |
f73e65b9e2710c55209a3a72cd65dc8eff53b1c7 | 216 | py | Python | localtileserver/_version.py | RichardScottOZ/localtileserver | a0e63d1853c8d3410ba9d4ca51c993eae15a0fe6 | [
"MIT"
] | 1 | 2021-12-12T05:19:20.000Z | 2021-12-12T05:19:20.000Z | localtileserver/_version.py | RichardScottOZ/localtileserver | a0e63d1853c8d3410ba9d4ca51c993eae15a0fe6 | [
"MIT"
] | null | null | null | localtileserver/_version.py | RichardScottOZ/localtileserver | a0e63d1853c8d3410ba9d4ca51c993eae15a0fe6 | [
"MIT"
] | null | null | null | from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution("localtileserver").version
except DistributionNotFound:
# package is not installed
__version__ = None
| 27 | 64 | 0.800926 | from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution("localtileserver").version
except DistributionNotFound:
__version__ = None
| true | true |
f73e6616f5306b636fabda21070ea9fb0dbf94a4 | 1,841 | py | Python | codedigger/codechef/migrations/0001_initial.py | jyothiprakashpanaik/Backend | 9ab1b57436a0a1a6197777c0b36c842e71121d3a | [
"Apache-2.0"
] | 17 | 2020-10-07T22:40:37.000Z | 2022-01-20T07:19:09.000Z | codedigger/codechef/migrations/0001_initial.py | jyothiprakashpanaik/Backend | 9ab1b57436a0a1a6197777c0b36c842e71121d3a | [
"Apache-2.0"
] | 42 | 2021-06-03T01:58:04.000Z | 2022-01-31T14:49:22.000Z | codedigger/codechef/migrations/0001_initial.py | jyothiprakashpanaik/Backend | 9ab1b57436a0a1a6197777c0b36c842e71121d3a | [
"Apache-2.0"
] | 25 | 2020-10-06T17:55:19.000Z | 2021-12-09T07:56:50.000Z | # Generated by Django 3.1.4 on 2021-12-19 04:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('problem', '0005_auto_20210129_2145'),
]
operations = [
migrations.CreateModel(
name='CodechefContest',
fields=[
('id',
models.AutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('name', models.CharField(max_length=200)),
('contestId', models.CharField(db_index=True, max_length=10)),
('duration', models.IntegerField(blank=True, null=True)),
('startTime', models.DateTimeField(blank=True, null=True)),
('url', models.CharField(blank=True, max_length=200,
null=True)),
],
),
migrations.CreateModel(
name='CodechefContestProblems',
fields=[
('id',
models.AutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('contest',
models.ForeignKey(blank=True,
on_delete=django.db.models.deletion.CASCADE,
to='codechef.codechefcontest')),
('problem',
models.ForeignKey(blank=True,
on_delete=django.db.models.deletion.CASCADE,
to='problem.problem')),
],
),
]
| 36.098039 | 79 | 0.456817 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('problem', '0005_auto_20210129_2145'),
]
operations = [
migrations.CreateModel(
name='CodechefContest',
fields=[
('id',
models.AutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('name', models.CharField(max_length=200)),
('contestId', models.CharField(db_index=True, max_length=10)),
('duration', models.IntegerField(blank=True, null=True)),
('startTime', models.DateTimeField(blank=True, null=True)),
('url', models.CharField(blank=True, max_length=200,
null=True)),
],
),
migrations.CreateModel(
name='CodechefContestProblems',
fields=[
('id',
models.AutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('contest',
models.ForeignKey(blank=True,
on_delete=django.db.models.deletion.CASCADE,
to='codechef.codechefcontest')),
('problem',
models.ForeignKey(blank=True,
on_delete=django.db.models.deletion.CASCADE,
to='problem.problem')),
],
),
]
| true | true |
f73e671e6aa1ad7ed86ea0482810b031647921c5 | 1,796 | py | Python | source/playbooks/PCI321/ssmdocs/scripts/test/test_pci_get_input_values.py | sybeck2k/aws-security-hub-automated-response-and-remediation | c47870fbda6a41ad000dcf6c40db9033cb3b7abb | [
"Apache-2.0"
] | 129 | 2020-08-11T18:18:50.000Z | 2021-10-04T20:00:35.000Z | source/playbooks/PCI321/ssmdocs/scripts/test/test_pci_get_input_values.py | sybeck2k/aws-security-hub-automated-response-and-remediation | c47870fbda6a41ad000dcf6c40db9033cb3b7abb | [
"Apache-2.0"
] | 39 | 2020-08-11T18:07:58.000Z | 2021-10-15T16:26:24.000Z | source/playbooks/PCI321/ssmdocs/scripts/test/test_pci_get_input_values.py | sybeck2k/aws-security-hub-automated-response-and-remediation | c47870fbda6a41ad000dcf6c40db9033cb3b7abb | [
"Apache-2.0"
] | 35 | 2020-08-15T04:57:27.000Z | 2021-09-21T06:23:17.000Z | #!/usr/bin/python
###############################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not #
# use this file except in compliance with the License. A copy of the License #
# is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0/ #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express #
# or implied. See the License for the specific language governing permis- #
# sions and limitations under the License. #
###############################################################################
import pytest
from pci_get_input_values import verify
def expected():
return {
"filter_name": "SHARR_Filter_PCI_321_Finding_CW1_RootAccountUsage",
"filter_pattern": '{$.userIdentity.type="Root" && $.userIdentity.invokedBy NOT EXISTS && $.eventType !="AwsServiceEvent"}',
"metric_name": "SHARR_PCI_321_Finding_CW1_RootAccountUsage",
"metric_value": 1,
"alarm_name": "SHARR_Alarm_PCI_321_Finding_CW1_RootAccountUsage",
"alarm_desc": "Alarm for PCI finding CW.1 RootAccountUsage",
"alarm_threshold": 1
}
def test_verify():
assert verify({'ControlId': 'PCI.CW.1'}, {}) == expected()
| 54.424242 | 131 | 0.492762 | true | true | |
f73e676da3ddcc59d17aa8b507376a695d41a196 | 114 | py | Python | jobs/admin.py | Xubash/Portfolio-using-django | 5eb3d09cbb4d3782fbdc22144cad75c96100b595 | [
"MIT"
] | null | null | null | jobs/admin.py | Xubash/Portfolio-using-django | 5eb3d09cbb4d3782fbdc22144cad75c96100b595 | [
"MIT"
] | 8 | 2021-03-19T01:22:50.000Z | 2022-03-12T00:21:05.000Z | jobs/admin.py | Xubash/Portfolio-using-django | 5eb3d09cbb4d3782fbdc22144cad75c96100b595 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Jobs
# Register your models here.
admin.site.register(Jobs)
| 19 | 32 | 0.798246 | from django.contrib import admin
from .models import Jobs
admin.site.register(Jobs)
| true | true |
f73e678a57d4db67f779882542969daa468f8c46 | 7,770 | py | Python | stage/configuration/test_postgresql_multitable_consumer_origin.py | anubandhan/datacollector-tests | 301c024c66d68353735256b262b681dd05ba16cc | [
"Apache-2.0"
] | null | null | null | stage/configuration/test_postgresql_multitable_consumer_origin.py | anubandhan/datacollector-tests | 301c024c66d68353735256b262b681dd05ba16cc | [
"Apache-2.0"
] | 1 | 2019-04-24T11:06:38.000Z | 2019-04-24T11:06:38.000Z | stage/configuration/test_postgresql_multitable_consumer_origin.py | anubandhan/datacollector-tests | 301c024c66d68353735256b262b681dd05ba16cc | [
"Apache-2.0"
] | 2 | 2019-05-24T06:34:37.000Z | 2020-03-30T11:48:18.000Z | import logging
import string
import pytest
import sqlalchemy
from streamsets.testframework.decorators import stub
from streamsets.testframework.markers import category, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
pytestmark = [pytest.mark.sdc_min_version('3.15.0'), pytest.mark.database('postgresql')]
@stub
@category('advanced')
def test_additional_jdbc_configuration_properties(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'auto_commit': False}, {'auto_commit': True}])
def test_auto_commit(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'per_batch_strategy': 'SWITCH_TABLES'}])
def test_batches_from_result_set(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'ssl_mode': 'VERIFY_CA'}, {'ssl_mode': 'VERIFY_FULL'}])
def test_ca_certificate_pem(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
def test_connection_health_test_query(sdc_builder, sdc_executor, database):
pass
@stub
@category('basic')
def test_connection_string(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_connection_timeout_in_seconds(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'convert_timestamp_to_string': False},
{'convert_timestamp_to_string': True}])
def test_convert_timestamp_to_string(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
def test_data_time_zone(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'enforce_read_only_connection': False},
{'enforce_read_only_connection': True}])
def test_enforce_read_only_connection(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
def test_fetch_size(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_idle_timeout_in_seconds(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_init_query(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'initial_table_order_strategy': 'ALPHABETICAL'},
{'initial_table_order_strategy': 'NONE'},
{'initial_table_order_strategy': 'REFERENTIAL_CONSTRAINTS'}])
def test_initial_table_order_strategy(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
def test_jdbc_driver_class_name(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_max_batch_size_in_records(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_max_blob_size_in_bytes(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_max_clob_size_in_characters(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_max_connection_lifetime_in_seconds(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_maximum_pool_size(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_minimum_idle_connections(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_new_table_discovery_interval(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_no_more_data_event_generation_delay_in_seconds(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_number_of_retries_on_sql_error(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_number_of_threads(sdc_builder, sdc_executor, database):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'},
{'on_record_error': 'STOP_PIPELINE'},
{'on_record_error': 'TO_ERROR'}])
def test_on_record_error(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'on_unknown_type': 'CONVERT_TO_STRING'},
{'on_unknown_type': 'STOP_PIPELINE'}])
def test_on_unknown_type(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'use_credentials': True}])
def test_password(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'per_batch_strategy': 'PROCESS_ALL_AVAILABLE_ROWS_FROM_TABLE'},
{'per_batch_strategy': 'SWITCH_TABLES'}])
def test_per_batch_strategy(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
def test_queries_per_second(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'quote_character': 'BACKTICK'},
{'quote_character': 'DOUBLE_QUOTES'},
{'quote_character': 'NONE'}])
def test_quote_character(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'per_batch_strategy': 'SWITCH_TABLES'}])
def test_result_set_cache_size(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'ssl_mode': 'VERIFY_CA'}, {'ssl_mode': 'VERIFY_FULL'}])
def test_server_certificate_pem(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'ssl_mode': 'DISABLED'},
{'ssl_mode': 'REQUIRED'},
{'ssl_mode': 'VERIFY_CA'},
{'ssl_mode': 'VERIFY_FULL'}])
def test_ssl_mode(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
def test_table_configs(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'transaction_isolation': 'DEFAULT'},
{'transaction_isolation': 'TRANSACTION_READ_COMMITTED'},
{'transaction_isolation': 'TRANSACTION_READ_UNCOMMITTED'},
{'transaction_isolation': 'TRANSACTION_REPEATABLE_READ'},
{'transaction_isolation': 'TRANSACTION_SERIALIZABLE'}])
def test_transaction_isolation(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'use_credentials': False}, {'use_credentials': True}])
def test_use_credentials(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'use_credentials': True}])
def test_username(sdc_builder, sdc_executor, database, stage_attributes):
pass
| 27.849462 | 110 | 0.701158 | import logging
import string
import pytest
import sqlalchemy
from streamsets.testframework.decorators import stub
from streamsets.testframework.markers import category, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
pytestmark = [pytest.mark.sdc_min_version('3.15.0'), pytest.mark.database('postgresql')]
@stub
@category('advanced')
def test_additional_jdbc_configuration_properties(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'auto_commit': False}, {'auto_commit': True}])
def test_auto_commit(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'per_batch_strategy': 'SWITCH_TABLES'}])
def test_batches_from_result_set(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'ssl_mode': 'VERIFY_CA'}, {'ssl_mode': 'VERIFY_FULL'}])
def test_ca_certificate_pem(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
def test_connection_health_test_query(sdc_builder, sdc_executor, database):
pass
@stub
@category('basic')
def test_connection_string(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_connection_timeout_in_seconds(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'convert_timestamp_to_string': False},
{'convert_timestamp_to_string': True}])
def test_convert_timestamp_to_string(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
def test_data_time_zone(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'enforce_read_only_connection': False},
{'enforce_read_only_connection': True}])
def test_enforce_read_only_connection(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
def test_fetch_size(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_idle_timeout_in_seconds(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_init_query(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'initial_table_order_strategy': 'ALPHABETICAL'},
{'initial_table_order_strategy': 'NONE'},
{'initial_table_order_strategy': 'REFERENTIAL_CONSTRAINTS'}])
def test_initial_table_order_strategy(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
def test_jdbc_driver_class_name(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_max_batch_size_in_records(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_max_blob_size_in_bytes(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_max_clob_size_in_characters(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_max_connection_lifetime_in_seconds(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_maximum_pool_size(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_minimum_idle_connections(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_new_table_discovery_interval(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_no_more_data_event_generation_delay_in_seconds(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_number_of_retries_on_sql_error(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
def test_number_of_threads(sdc_builder, sdc_executor, database):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'},
{'on_record_error': 'STOP_PIPELINE'},
{'on_record_error': 'TO_ERROR'}])
def test_on_record_error(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'on_unknown_type': 'CONVERT_TO_STRING'},
{'on_unknown_type': 'STOP_PIPELINE'}])
def test_on_unknown_type(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'use_credentials': True}])
def test_password(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'per_batch_strategy': 'PROCESS_ALL_AVAILABLE_ROWS_FROM_TABLE'},
{'per_batch_strategy': 'SWITCH_TABLES'}])
def test_per_batch_strategy(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
def test_queries_per_second(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'quote_character': 'BACKTICK'},
{'quote_character': 'DOUBLE_QUOTES'},
{'quote_character': 'NONE'}])
def test_quote_character(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'per_batch_strategy': 'SWITCH_TABLES'}])
def test_result_set_cache_size(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'ssl_mode': 'VERIFY_CA'}, {'ssl_mode': 'VERIFY_FULL'}])
def test_server_certificate_pem(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'ssl_mode': 'DISABLED'},
{'ssl_mode': 'REQUIRED'},
{'ssl_mode': 'VERIFY_CA'},
{'ssl_mode': 'VERIFY_FULL'}])
def test_ssl_mode(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
def test_table_configs(sdc_builder, sdc_executor, database):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'transaction_isolation': 'DEFAULT'},
{'transaction_isolation': 'TRANSACTION_READ_COMMITTED'},
{'transaction_isolation': 'TRANSACTION_READ_UNCOMMITTED'},
{'transaction_isolation': 'TRANSACTION_REPEATABLE_READ'},
{'transaction_isolation': 'TRANSACTION_SERIALIZABLE'}])
def test_transaction_isolation(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('advanced')
@pytest.mark.parametrize('stage_attributes', [{'use_credentials': False}, {'use_credentials': True}])
def test_use_credentials(sdc_builder, sdc_executor, database, stage_attributes):
pass
@stub
@category('basic')
@pytest.mark.parametrize('stage_attributes', [{'use_credentials': True}])
def test_username(sdc_builder, sdc_executor, database, stage_attributes):
pass
| true | true |
f73e68f13e3d392025e0a601a411e2dcd52e6dd2 | 3,072 | py | Python | programmes/analysis.py | louisMAURY/Markdown_to_HTML | 5d0b37d844b241200d5a245ac1ee9b23510e6a2d | [
"BSD-3-Clause"
] | 1 | 2019-01-14T23:12:04.000Z | 2019-01-14T23:12:04.000Z | programmes/analysis.py | louisMAURY/Markdown_to_HTML | 5d0b37d844b241200d5a245ac1ee9b23510e6a2d | [
"BSD-3-Clause"
] | null | null | null | programmes/analysis.py | louisMAURY/Markdown_to_HTML | 5d0b37d844b241200d5a245ac1ee9b23510e6a2d | [
"BSD-3-Clause"
] | null | null | null | from conding_htm import *
fileuh = "markdown.md"
fichier = "markdown.md"
# The fonction read...just read the file
def readeuh(fileuh):
with open(fileuh , "r") as markdown:
global contents
contents = markdown.readlines()
return contents
# The function say the level of the title
def a_title():
readeuh(fichier)
for element in contents:
counter = 0
for cara in element:
if cara == "#":
counter += 1
# Title level 1
if counter == 1:
h1(element)
print("TITRE DE NIV 1")
# Title level 2
elif counter == 2:
h2(element)
print("TITRE DE NIV 2")
# Title level 3
elif counter == 3:
h3(element)
print("TITRE DE NIV 3")
# Title level 4
elif counter == 4:
h4(element)
print("TITRE DE NIV 4")
# Title level 5
elif counter == 5:
h5(element)
print("TITRE DE NIV 5")
# Title level 6
elif counter == 6:
h6(element)
print("TITRE DE NIV 6")
# The function say if their is an important part text and the type of the program
def a_italic_bold():
readeuh(fichier)
for element in contents:
counter_star = 0
counter_under = 0
for cara in element:
if cara == "*":
counter_star += 1
elif cara == "_":
counter_under += 1
# *Italique* word
if counter_star == 2:
italic(element , "*")
print("C'est un mot italique")
# **Bold** word
elif counter_star == 4:
bold(element , "**")
print("C'est un mot gras")
# _italique_ word
if counter_under == 2:
italic(element , "_")
print("Italique aussi")
# __bold__ word
elif counter_under == 4:
bold(element , "__")
print("Gras aussi")
# The function say if their is a link
def a_link():
readeuh(fichier)
for element in contents:
counter = 0
for cara in element:
if cara =="[":
counter += 1
if cara == "]":
counter += 1
if cara == "(":
counter += 1
if cara == ")":
counter += 1
# link
if counter == 4:
link(element)
print("C'EST UN LIEN")
# The function say if their is a list
def a_list():
readeuh(fichier)
for element in contents:
counter = 0
# print(element)
# print(contents)
for cara in element:
if cara == "-":
counter += 1
if counter == 2:
ul_li(contents)
print("Y'A UNE LISTE ICI")
# if element != "-":
# print(element)
# ul_li(contents)
a_title()
a_italic_bold()
a_link()
a_list()
| 23.630769 | 81 | 0.470052 | from conding_htm import *
fileuh = "markdown.md"
fichier = "markdown.md"
def readeuh(fileuh):
with open(fileuh , "r") as markdown:
global contents
contents = markdown.readlines()
return contents
def a_title():
readeuh(fichier)
for element in contents:
counter = 0
for cara in element:
if cara == "#":
counter += 1
if counter == 1:
h1(element)
print("TITRE DE NIV 1")
elif counter == 2:
h2(element)
print("TITRE DE NIV 2")
elif counter == 3:
h3(element)
print("TITRE DE NIV 3")
elif counter == 4:
h4(element)
print("TITRE DE NIV 4")
elif counter == 5:
h5(element)
print("TITRE DE NIV 5")
elif counter == 6:
h6(element)
print("TITRE DE NIV 6")
def a_italic_bold():
readeuh(fichier)
for element in contents:
counter_star = 0
counter_under = 0
for cara in element:
if cara == "*":
counter_star += 1
elif cara == "_":
counter_under += 1
if counter_star == 2:
italic(element , "*")
print("C'est un mot italique")
# **Bold** word
elif counter_star == 4:
bold(element , "**")
print("C'est un mot gras")
if counter_under == 2:
italic(element , "_")
print("Italique aussi")
elif counter_under == 4:
bold(element , "__")
print("Gras aussi")
def a_link():
readeuh(fichier)
for element in contents:
counter = 0
for cara in element:
if cara =="[":
counter += 1
if cara == "]":
counter += 1
if cara == "(":
counter += 1
if cara == ")":
counter += 1
if counter == 4:
link(element)
print("C'EST UN LIEN")
# The function say if their is a list
def a_list():
readeuh(fichier)
for element in contents:
counter = 0
# print(element)
# print(contents)
for cara in element:
if cara == "-":
counter += 1
if counter == 2:
ul_li(contents)
print("Y'A UNE LISTE ICI")
a_title()
a_italic_bold()
a_link()
a_list()
| true | true |
f73e69011f93c5f86577ad4c26e5ff9dc8d1e914 | 1,817 | py | Python | boofuzz/boofuzz/connections/base_socket_connection.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:01.000Z | 2021-06-04T14:27:15.000Z | boofuzz/boofuzz/connections/base_socket_connection.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | null | null | null | boofuzz/boofuzz/connections/base_socket_connection.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:09.000Z | 2021-06-04T14:27:21.000Z | from __future__ import absolute_import
import abc
import math
import os
import socket
import struct
from future.utils import with_metaclass
from boofuzz.connections import itarget_connection
def _seconds_to_sockopt_format(seconds):
"""Convert floating point seconds value to second/useconds struct used by UNIX socket library.
For Windows, convert to whole milliseconds.
"""
if os.name == "nt":
return int(seconds * 1000)
else:
microseconds_per_second = 1000000
whole_seconds = int(math.floor(seconds))
whole_microseconds = int(math.floor((seconds % 1) * microseconds_per_second))
return struct.pack("ll", whole_seconds, whole_microseconds)
class BaseSocketConnection(with_metaclass(abc.ABCMeta, itarget_connection.ITargetConnection)):
"""This class serves as a base for a number of Connections over sockets.
.. versionadded:: 0.2.0
Args:
send_timeout (float): Seconds to wait for send before timing out. Default 5.0.
recv_timeout (float): Seconds to wait for recv before timing out. Default 5.0.
"""
def __init__(self, send_timeout, recv_timeout):
self._send_timeout = send_timeout
self._recv_timeout = recv_timeout
self._sock = None
def close(self):
"""
Close connection to the target.
Returns:
None
"""
self._sock.close()
@abc.abstractmethod
def open(self):
"""
Opens connection to the target. Make sure to call close!
Returns:
None
"""
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDTIMEO, _seconds_to_sockopt_format(self._send_timeout))
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, _seconds_to_sockopt_format(self._recv_timeout))
| 29.306452 | 116 | 0.687947 | from __future__ import absolute_import
import abc
import math
import os
import socket
import struct
from future.utils import with_metaclass
from boofuzz.connections import itarget_connection
def _seconds_to_sockopt_format(seconds):
if os.name == "nt":
return int(seconds * 1000)
else:
microseconds_per_second = 1000000
whole_seconds = int(math.floor(seconds))
whole_microseconds = int(math.floor((seconds % 1) * microseconds_per_second))
return struct.pack("ll", whole_seconds, whole_microseconds)
class BaseSocketConnection(with_metaclass(abc.ABCMeta, itarget_connection.ITargetConnection)):
def __init__(self, send_timeout, recv_timeout):
self._send_timeout = send_timeout
self._recv_timeout = recv_timeout
self._sock = None
def close(self):
self._sock.close()
@abc.abstractmethod
def open(self):
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDTIMEO, _seconds_to_sockopt_format(self._send_timeout))
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, _seconds_to_sockopt_format(self._recv_timeout))
| true | true |
f73e69d2ce2e869cde4a34aede06b0ef96caaedb | 552 | py | Python | spoklient/onedrive/drive_item.py | franciscomcdias/SpoKlient | b2f6954cc3e35c84b8686588d4fd4636f13153d4 | [
"MIT"
] | null | null | null | spoklient/onedrive/drive_item.py | franciscomcdias/SpoKlient | b2f6954cc3e35c84b8686588d4fd4636f13153d4 | [
"MIT"
] | null | null | null | spoklient/onedrive/drive_item.py | franciscomcdias/SpoKlient | b2f6954cc3e35c84b8686588d4fd4636f13153d4 | [
"MIT"
] | null | null | null | from spoklient.onedrive.base_item import BaseItem
from spoklient.runtime.resource_path_entity import ResourcePathEntity
class DriveItem(BaseItem):
"""The driveItem resource represents a file, folder, or other item stored in a drive. All file system objects in
OneDrive and SharePoint are returned as driveItem resources """
def item_with_path(self, path):
"""Retrieve DriveItem by path"""
return DriveItem(self.context,
ResourcePathEntity(self.context, self.resource_path, ':/{0}'.format(path)))
| 42.461538 | 116 | 0.726449 | from spoklient.onedrive.base_item import BaseItem
from spoklient.runtime.resource_path_entity import ResourcePathEntity
class DriveItem(BaseItem):
def item_with_path(self, path):
return DriveItem(self.context,
ResourcePathEntity(self.context, self.resource_path, ':/{0}'.format(path)))
| true | true |
f73e6ae940e9f17aa57ffd9e88ca884f932a18c9 | 4,638 | py | Python | ics/structures/srad_gigalog_settings.py | hollinsky-intrepid/python_ics | b6ec5486ec3cc2548e33845c265faccf293b88f5 | [
"Unlicense"
] | null | null | null | ics/structures/srad_gigalog_settings.py | hollinsky-intrepid/python_ics | b6ec5486ec3cc2548e33845c265faccf293b88f5 | [
"Unlicense"
] | null | null | null | ics/structures/srad_gigalog_settings.py | hollinsky-intrepid/python_ics | b6ec5486ec3cc2548e33845c265faccf293b88f5 | [
"Unlicense"
] | null | null | null | # This file was auto generated; Do not modify, if you value your sanity!
import ctypes
try: # 14
from can_settings import can_settings
from canfd_settings import canfd_settings
from iso9141_keyword2000_settings import iso9141_keyword2000_settings
from s_text_api_settings import s_text_api_settings
from disk_settings import disk_settings
from timesync_icshardware_settings import timesync_icshardware_settings
from ethernet_settings2 import ethernet_settings2
from serdescam_settings import serdescam_settings
from ethernet10_g_settings import ethernet10_g_settings
from lin_settings import lin_settings
from serdespoc_settings import serdespoc_settings
from logger_settings import logger_settings
from rad_reporting_settings import rad_reporting_settings
from serdesgen_settings import serdesgen_settings
except:
from ics.structures.can_settings import can_settings
from ics.structures.canfd_settings import canfd_settings
from ics.structures.iso9141_keyword2000_settings import iso9141_keyword2000_settings
from ics.structures.s_text_api_settings import s_text_api_settings
from ics.structures.disk_settings import disk_settings
from ics.structures.timesync_icshardware_settings import timesync_icshardware_settings
from ics.structures.ethernet_settings2 import ethernet_settings2
from ics.structures.serdescam_settings import serdescam_settings
from ics.structures.ethernet10_g_settings import ethernet10_g_settings
from ics.structures.lin_settings import lin_settings
from ics.structures.serdespoc_settings import serdespoc_settings
from ics.structures.logger_settings import logger_settings
from ics.structures.rad_reporting_settings import rad_reporting_settings
from ics.structures.serdesgen_settings import serdesgen_settings
# flags
class flags(ctypes.Structure):
_pack_ = 2
_fields_ = [
('hwComLatencyTestEn', ctypes.c_uint16, 1), # [Bitfield]
('disableUsbCheckOnBoot', ctypes.c_uint16, 1), # [Bitfield]
('reserved', ctypes.c_uint16, 14), # [Bitfield]
]
# Extra names go here:
# End of extra names
class srad_gigalog_settings(ctypes.Structure):
_pack_ = 2
_anonymous_ = ("flags",)
_fields_ = [
('ecu_id', ctypes.c_uint32),
('perf_en', ctypes.c_uint16),
('can1', can_settings),
('canfd1', canfd_settings),
('can2', can_settings),
('canfd2', canfd_settings),
('can3', can_settings),
('canfd3', canfd_settings),
('can4', can_settings),
('canfd4', canfd_settings),
('can5', can_settings),
('canfd5', canfd_settings),
('can6', can_settings),
('canfd6', canfd_settings),
('can7', can_settings),
('canfd7', canfd_settings),
('can8', can_settings),
('canfd8', canfd_settings),
('network_enables', ctypes.c_uint16),
('network_enables_2', ctypes.c_uint16),
('pwr_man_timeout', ctypes.c_uint32),
('pwr_man_enable', ctypes.c_uint16),
('network_enabled_on_boot', ctypes.c_uint16),
('iso15765_separation_time_offset', ctypes.c_uint16),
('iso_9141_kwp_enable_reserved', ctypes.c_uint16),
('iso9141_kwp_settings_1', iso9141_keyword2000_settings),
('iso_parity_1', ctypes.c_uint16),
('iso_msg_termination_1', ctypes.c_uint16),
('idle_wakeup_network_enables_1', ctypes.c_uint16),
('idle_wakeup_network_enables_2', ctypes.c_uint16),
('network_enables_3', ctypes.c_uint16),
('idle_wakeup_network_enables_3', ctypes.c_uint16),
('text_api', s_text_api_settings),
('termination_enables', ctypes.c_uint64),
('rsvd1', ctypes.c_uint8 * 8),
('rsvd2', ctypes.c_uint8 * 8),
('disk', disk_settings),
('timeSyncSettings', timesync_icshardware_settings),
('flags', flags),
('ethernet', ethernet_settings2),
('serdescam1', serdescam_settings),
('ethernet10g', ethernet10_g_settings),
('lin1', lin_settings),
('serdespoc', serdespoc_settings),
('logger', logger_settings),
('serdescam2', serdescam_settings),
('serdescam3', serdescam_settings),
('serdescam4', serdescam_settings),
('ethernet2', ethernet_settings2),
('network_enables_4', ctypes.c_uint16),
('reporting', rad_reporting_settings),
('serdesgen', serdesgen_settings),
]
# Extra names go here:
SRADGigalogSettings = srad_gigalog_settings
# End of extra names
| 42.550459 | 90 | 0.703967 |
import ctypes
try:
from can_settings import can_settings
from canfd_settings import canfd_settings
from iso9141_keyword2000_settings import iso9141_keyword2000_settings
from s_text_api_settings import s_text_api_settings
from disk_settings import disk_settings
from timesync_icshardware_settings import timesync_icshardware_settings
from ethernet_settings2 import ethernet_settings2
from serdescam_settings import serdescam_settings
from ethernet10_g_settings import ethernet10_g_settings
from lin_settings import lin_settings
from serdespoc_settings import serdespoc_settings
from logger_settings import logger_settings
from rad_reporting_settings import rad_reporting_settings
from serdesgen_settings import serdesgen_settings
except:
from ics.structures.can_settings import can_settings
from ics.structures.canfd_settings import canfd_settings
from ics.structures.iso9141_keyword2000_settings import iso9141_keyword2000_settings
from ics.structures.s_text_api_settings import s_text_api_settings
from ics.structures.disk_settings import disk_settings
from ics.structures.timesync_icshardware_settings import timesync_icshardware_settings
from ics.structures.ethernet_settings2 import ethernet_settings2
from ics.structures.serdescam_settings import serdescam_settings
from ics.structures.ethernet10_g_settings import ethernet10_g_settings
from ics.structures.lin_settings import lin_settings
from ics.structures.serdespoc_settings import serdespoc_settings
from ics.structures.logger_settings import logger_settings
from ics.structures.rad_reporting_settings import rad_reporting_settings
from ics.structures.serdesgen_settings import serdesgen_settings
class flags(ctypes.Structure):
_pack_ = 2
_fields_ = [
('hwComLatencyTestEn', ctypes.c_uint16, 1),
('disableUsbCheckOnBoot', ctypes.c_uint16, 1),
('reserved', ctypes.c_uint16, 14),
]
class srad_gigalog_settings(ctypes.Structure):
_pack_ = 2
_anonymous_ = ("flags",)
_fields_ = [
('ecu_id', ctypes.c_uint32),
('perf_en', ctypes.c_uint16),
('can1', can_settings),
('canfd1', canfd_settings),
('can2', can_settings),
('canfd2', canfd_settings),
('can3', can_settings),
('canfd3', canfd_settings),
('can4', can_settings),
('canfd4', canfd_settings),
('can5', can_settings),
('canfd5', canfd_settings),
('can6', can_settings),
('canfd6', canfd_settings),
('can7', can_settings),
('canfd7', canfd_settings),
('can8', can_settings),
('canfd8', canfd_settings),
('network_enables', ctypes.c_uint16),
('network_enables_2', ctypes.c_uint16),
('pwr_man_timeout', ctypes.c_uint32),
('pwr_man_enable', ctypes.c_uint16),
('network_enabled_on_boot', ctypes.c_uint16),
('iso15765_separation_time_offset', ctypes.c_uint16),
('iso_9141_kwp_enable_reserved', ctypes.c_uint16),
('iso9141_kwp_settings_1', iso9141_keyword2000_settings),
('iso_parity_1', ctypes.c_uint16),
('iso_msg_termination_1', ctypes.c_uint16),
('idle_wakeup_network_enables_1', ctypes.c_uint16),
('idle_wakeup_network_enables_2', ctypes.c_uint16),
('network_enables_3', ctypes.c_uint16),
('idle_wakeup_network_enables_3', ctypes.c_uint16),
('text_api', s_text_api_settings),
('termination_enables', ctypes.c_uint64),
('rsvd1', ctypes.c_uint8 * 8),
('rsvd2', ctypes.c_uint8 * 8),
('disk', disk_settings),
('timeSyncSettings', timesync_icshardware_settings),
('flags', flags),
('ethernet', ethernet_settings2),
('serdescam1', serdescam_settings),
('ethernet10g', ethernet10_g_settings),
('lin1', lin_settings),
('serdespoc', serdespoc_settings),
('logger', logger_settings),
('serdescam2', serdescam_settings),
('serdescam3', serdescam_settings),
('serdescam4', serdescam_settings),
('ethernet2', ethernet_settings2),
('network_enables_4', ctypes.c_uint16),
('reporting', rad_reporting_settings),
('serdesgen', serdesgen_settings),
]
SRADGigalogSettings = srad_gigalog_settings
| true | true |
f73e6b15818eb58995d4a5233f635921cdf3f490 | 1,067 | py | Python | logdto/create.py | hangilc/myclinic-spring | 5d2befd7901439d8e8c0102e0c81cf356b5b38ba | [
"MIT"
] | null | null | null | logdto/create.py | hangilc/myclinic-spring | 5d2befd7901439d8e8c0102e0c81cf356b5b38ba | [
"MIT"
] | 2 | 2020-03-04T22:17:34.000Z | 2020-03-21T05:58:49.000Z | logdto/create.py | hangilc/myclinic-spring | 5d2befd7901439d8e8c0102e0c81cf356b5b38ba | [
"MIT"
] | null | null | null | import io
import os
from jinja2 import Environment, FileSystemLoader
import sys
name = sys.argv[1]
prog_dir = os.path.abspath(os.path.dirname(__file__))
env = Environment(loader=FileSystemLoader(prog_dir), trim_blocks=True, lstrip_blocks=True)
template = env.get_template('TemplateCreated.java.txt')
output = template.render(name = name)
output_file = prog_dir + "/src/main/java/jp/chang/myclinic/logdto/practicelog/" + name + "Created.java"
with io.open(output_file, "w", encoding='utf8') as f:
f.write(output)
template = env.get_template('TemplateUpdated.java.txt')
output = template.render(name = name)
output_file = prog_dir + "/src/main/java/jp/chang/myclinic/logdto/practicelog/" + name + "Updated.java"
with io.open(output_file, "w", encoding='utf8') as f:
f.write(output)
template = env.get_template('TemplateDeleted.java.txt')
output = template.render(name = name)
output_file = prog_dir + "/src/main/java/jp/chang/myclinic/logdto/practicelog/" + name + "Deleted.java"
with io.open(output_file, "w", encoding='utf8') as f:
f.write(output)
| 35.566667 | 103 | 0.74508 | import io
import os
from jinja2 import Environment, FileSystemLoader
import sys
name = sys.argv[1]
prog_dir = os.path.abspath(os.path.dirname(__file__))
env = Environment(loader=FileSystemLoader(prog_dir), trim_blocks=True, lstrip_blocks=True)
template = env.get_template('TemplateCreated.java.txt')
output = template.render(name = name)
output_file = prog_dir + "/src/main/java/jp/chang/myclinic/logdto/practicelog/" + name + "Created.java"
with io.open(output_file, "w", encoding='utf8') as f:
f.write(output)
template = env.get_template('TemplateUpdated.java.txt')
output = template.render(name = name)
output_file = prog_dir + "/src/main/java/jp/chang/myclinic/logdto/practicelog/" + name + "Updated.java"
with io.open(output_file, "w", encoding='utf8') as f:
f.write(output)
template = env.get_template('TemplateDeleted.java.txt')
output = template.render(name = name)
output_file = prog_dir + "/src/main/java/jp/chang/myclinic/logdto/practicelog/" + name + "Deleted.java"
with io.open(output_file, "w", encoding='utf8') as f:
f.write(output)
| true | true |
f73e6c912a65ce8c693c6f49b8b967b066c3a07f | 5,024 | py | Python | corehq/apps/hqmedia/cache.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/apps/hqmedia/cache.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/apps/hqmedia/cache.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | import io
import zipfile
from django.core.cache import cache
from django.utils.translation import ugettext as _
from soil import DownloadBase
from corehq.apps.hqmedia.models import (
CommCareAudio,
CommCareImage,
CommCareVideo,
)
class BaseMultimediaStatusCache(object):
upload_type = None
cache_expiry = 60 * 60 # defaults to one hour
def __init__(self, processing_id):
self.processing_id = processing_id
self.in_celery = False
self.complete = False
self.progress = 0
self.errors = []
if self.upload_type is None:
raise NotImplementedError("You need to specify an upload type.")
def __str__(self):
return "Status of process id %(processing_id)s: %(progress)d%%" % {
'processing_id': self.processing_id,
'progress': self.progress,
}
def save(self):
cache.set(self.get_cache_key(self.processing_id), self, self.cache_expiry)
def mark_with_error(self, error_str):
self.complete = True
self.errors.append(error_str)
self.save()
def get_response(self):
"""
Response that gets sent back to the upload controller.
"""
return {
'type': self.upload_type,
'in_celery': self.in_celery,
'complete': self.complete,
'progress': self.progress,
'errors': self.errors,
'processing_id': self.processing_id,
}
@classmethod
def get_cache_key(cls, processing_id):
raise NotImplementedError("You need to specify a cache_key format for the status.")
@classmethod
def get(cls, processing_id):
return cache.get(cls.get_cache_key(processing_id))
class BulkMultimediaStatusCache(BaseMultimediaStatusCache):
upload_type = "zip"
def __init__(self, processing_id):
super(BulkMultimediaStatusCache, self).__init__(processing_id)
self.skipped_files = []
self.unmatched_files = []
self.matched_files = dict((m.__name__, []) for m in self.allowed_media)
self.total_files = None
self.processed_files = None
@property
def allowed_media(self):
return [CommCareAudio, CommCareImage, CommCareVideo]
def get_response(self):
response = super(BulkMultimediaStatusCache, self).get_response()
response.update({
'unmatched_files': self.unmatched_files,
'matched_files': self.matched_files,
'total_files': self.total_files,
'processed_files': self.processed_files,
'skipped_files': self.skipped_files,
})
return response
def update_progress(self, num_files_processed):
if self.total_files is None:
raise ValueError("You need to set total_files before you can update progress.")
self.processed_files = num_files_processed
self.progress = int(100 * (self.processed_files / self.total_files))
if self.progress >= 100:
self.complete = True
self.save()
def add_skipped_path(self, path, mimetype):
self.skipped_files.append({
'path': path,
'mimetype': mimetype,
})
def add_unmatched_path(self, path, reason):
self.unmatched_files.append({
'path': path,
'reason': reason,
})
def add_matched_path(self, media_class, media_info):
if media_class.__name__ in self.matched_files:
self.matched_files[media_class.__name__].append(media_info)
else:
self.add_unmatched_path(media_info['path'],
_("Not a bulk-upload supported CommCareMedia type: %s" % media_class.__name__))
def _get_upload_file(self):
saved_file = io.BytesIO()
try:
saved_ref = DownloadBase.get(self.processing_id)
data = saved_ref.get_content()
except Exception as e:
self.mark_with_error(_("Could not fetch cached bulk upload file. Error: %s." % e))
return
saved_file.write(data)
saved_file.seek(0)
return saved_file
def get_upload_zip(self):
saved_file = self._get_upload_file()
try:
uploaded_zip = zipfile.ZipFile(saved_file)
except Exception as e:
self.mark_with_error(_("Error opening file as zip file: %s" % e))
return
if uploaded_zip.testzip():
self.mark_with_error(_("Error encountered processing Zip File. File doesn't look valid."))
return
return uploaded_zip
@classmethod
def get_cache_key(cls, processing_id):
return "MMBULK_%s" % processing_id
class BulkMultimediaStatusCacheNfs(BulkMultimediaStatusCache):
def __init__(self, processing_id, file_path):
super(BulkMultimediaStatusCacheNfs, self).__init__(processing_id)
self.file_path = file_path
def _get_upload_file(self):
return self.file_path
| 31.4 | 115 | 0.63535 | import io
import zipfile
from django.core.cache import cache
from django.utils.translation import ugettext as _
from soil import DownloadBase
from corehq.apps.hqmedia.models import (
CommCareAudio,
CommCareImage,
CommCareVideo,
)
class BaseMultimediaStatusCache(object):
upload_type = None
cache_expiry = 60 * 60
def __init__(self, processing_id):
self.processing_id = processing_id
self.in_celery = False
self.complete = False
self.progress = 0
self.errors = []
if self.upload_type is None:
raise NotImplementedError("You need to specify an upload type.")
def __str__(self):
return "Status of process id %(processing_id)s: %(progress)d%%" % {
'processing_id': self.processing_id,
'progress': self.progress,
}
def save(self):
cache.set(self.get_cache_key(self.processing_id), self, self.cache_expiry)
def mark_with_error(self, error_str):
self.complete = True
self.errors.append(error_str)
self.save()
def get_response(self):
return {
'type': self.upload_type,
'in_celery': self.in_celery,
'complete': self.complete,
'progress': self.progress,
'errors': self.errors,
'processing_id': self.processing_id,
}
@classmethod
def get_cache_key(cls, processing_id):
raise NotImplementedError("You need to specify a cache_key format for the status.")
@classmethod
def get(cls, processing_id):
return cache.get(cls.get_cache_key(processing_id))
class BulkMultimediaStatusCache(BaseMultimediaStatusCache):
upload_type = "zip"
def __init__(self, processing_id):
super(BulkMultimediaStatusCache, self).__init__(processing_id)
self.skipped_files = []
self.unmatched_files = []
self.matched_files = dict((m.__name__, []) for m in self.allowed_media)
self.total_files = None
self.processed_files = None
@property
def allowed_media(self):
return [CommCareAudio, CommCareImage, CommCareVideo]
def get_response(self):
response = super(BulkMultimediaStatusCache, self).get_response()
response.update({
'unmatched_files': self.unmatched_files,
'matched_files': self.matched_files,
'total_files': self.total_files,
'processed_files': self.processed_files,
'skipped_files': self.skipped_files,
})
return response
def update_progress(self, num_files_processed):
if self.total_files is None:
raise ValueError("You need to set total_files before you can update progress.")
self.processed_files = num_files_processed
self.progress = int(100 * (self.processed_files / self.total_files))
if self.progress >= 100:
self.complete = True
self.save()
def add_skipped_path(self, path, mimetype):
self.skipped_files.append({
'path': path,
'mimetype': mimetype,
})
def add_unmatched_path(self, path, reason):
self.unmatched_files.append({
'path': path,
'reason': reason,
})
def add_matched_path(self, media_class, media_info):
if media_class.__name__ in self.matched_files:
self.matched_files[media_class.__name__].append(media_info)
else:
self.add_unmatched_path(media_info['path'],
_("Not a bulk-upload supported CommCareMedia type: %s" % media_class.__name__))
def _get_upload_file(self):
saved_file = io.BytesIO()
try:
saved_ref = DownloadBase.get(self.processing_id)
data = saved_ref.get_content()
except Exception as e:
self.mark_with_error(_("Could not fetch cached bulk upload file. Error: %s." % e))
return
saved_file.write(data)
saved_file.seek(0)
return saved_file
def get_upload_zip(self):
saved_file = self._get_upload_file()
try:
uploaded_zip = zipfile.ZipFile(saved_file)
except Exception as e:
self.mark_with_error(_("Error opening file as zip file: %s" % e))
return
if uploaded_zip.testzip():
self.mark_with_error(_("Error encountered processing Zip File. File doesn't look valid."))
return
return uploaded_zip
@classmethod
def get_cache_key(cls, processing_id):
return "MMBULK_%s" % processing_id
class BulkMultimediaStatusCacheNfs(BulkMultimediaStatusCache):
def __init__(self, processing_id, file_path):
super(BulkMultimediaStatusCacheNfs, self).__init__(processing_id)
self.file_path = file_path
def _get_upload_file(self):
return self.file_path
| true | true |
f73e6e2aebc2034559453abb61c22121cf21b838 | 249 | py | Python | output/models/nist_data/atomic/decimal/schema_instance/nistschema_sv_iv_atomic_decimal_pattern_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/decimal/schema_instance/nistschema_sv_iv_atomic_decimal_pattern_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/decimal/schema_instance/nistschema_sv_iv_atomic_decimal_pattern_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.atomic.decimal.schema_instance.nistschema_sv_iv_atomic_decimal_pattern_2_xsd.nistschema_sv_iv_atomic_decimal_pattern_2 import NistschemaSvIvAtomicDecimalPattern2
__all__ = [
"NistschemaSvIvAtomicDecimalPattern2",
]
| 41.5 | 190 | 0.891566 | from output.models.nist_data.atomic.decimal.schema_instance.nistschema_sv_iv_atomic_decimal_pattern_2_xsd.nistschema_sv_iv_atomic_decimal_pattern_2 import NistschemaSvIvAtomicDecimalPattern2
__all__ = [
"NistschemaSvIvAtomicDecimalPattern2",
]
| true | true |
f73e6e794df595aa47697d58697ea6cd6e763330 | 2,172 | py | Python | db/transaction.py | eddiepbc/where_to_go | 4bfbe4cd4dbddadabf7a7f959ed7643dbd70f63b | [
"Apache-2.0"
] | 2 | 2019-11-06T03:20:09.000Z | 2020-04-15T17:40:02.000Z | db/transaction.py | eddiepbc/where_to_go | 4bfbe4cd4dbddadabf7a7f959ed7643dbd70f63b | [
"Apache-2.0"
] | 3 | 2021-03-19T07:55:41.000Z | 2022-01-13T01:48:31.000Z | db/transaction.py | eddiepbc/where_to_go | 4bfbe4cd4dbddadabf7a7f959ed7643dbd70f63b | [
"Apache-2.0"
] | 2 | 2019-11-07T03:15:36.000Z | 2019-11-07T03:20:06.000Z | # coding:utf-8
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer, Float, DateTime
# 创建对象的基类:
Base = declarative_base()
class lianjia_transaction(Base):
# 表的名字:
__tablename__ = 'lianjia_transaction'
# 表的结构:
id = Column(Integer, primary_key=True)
transactiondate = Column(DateTime)
price = Column(Float)
avgPrice = Column(Float)
ljID = Column(Float)
address = Column(String(255))
address1 = Column(String(15))
address2 = Column(String(15))
address3 = Column(String(15))
address4 = Column(String(15))
address5 = Column(String(15))
address6 = Column(String(15))
address7 = Column(String(15))
address8 = Column(String(15))
address9 = Column(String(15))
address10 = Column(String(15))
url=Column(String(500))
def __init__(self, data):
for key in data.keys():
if key == 'id':
self.id = data[key]
if key == 'transactiondate':
self.transactiondate = data[key]
if key == 'price':
self.price = data[key]
if key == 'avgPrice':
self.avgPrice = data[key]
if key == 'ljID':
self.ljID = data[key]
if key == 'address':
self.address = data[key]
if key == 'address1':
self.address1 = data[key]
if key == 'address2':
self.address2 = data[key]
if key == 'address3':
self.address3 = data[key]
if key == 'address4':
self.address4 = data[key]
if key == 'address5':
self.address5 = data[key]
if key == 'address6':
self.address6 = data[key]
if key == 'address7':
self.address7 = data[key]
if key == 'address8':
self.address8 = data[key]
if key == 'address9':
self.address9 = data[key]
if key == 'address10':
self.address10 = data[key]
if key == 'url':
self.url = data[key]
| 32.41791 | 63 | 0.522099 |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer, Float, DateTime
Base = declarative_base()
class lianjia_transaction(Base):
__tablename__ = 'lianjia_transaction'
id = Column(Integer, primary_key=True)
transactiondate = Column(DateTime)
price = Column(Float)
avgPrice = Column(Float)
ljID = Column(Float)
address = Column(String(255))
address1 = Column(String(15))
address2 = Column(String(15))
address3 = Column(String(15))
address4 = Column(String(15))
address5 = Column(String(15))
address6 = Column(String(15))
address7 = Column(String(15))
address8 = Column(String(15))
address9 = Column(String(15))
address10 = Column(String(15))
url=Column(String(500))
def __init__(self, data):
for key in data.keys():
if key == 'id':
self.id = data[key]
if key == 'transactiondate':
self.transactiondate = data[key]
if key == 'price':
self.price = data[key]
if key == 'avgPrice':
self.avgPrice = data[key]
if key == 'ljID':
self.ljID = data[key]
if key == 'address':
self.address = data[key]
if key == 'address1':
self.address1 = data[key]
if key == 'address2':
self.address2 = data[key]
if key == 'address3':
self.address3 = data[key]
if key == 'address4':
self.address4 = data[key]
if key == 'address5':
self.address5 = data[key]
if key == 'address6':
self.address6 = data[key]
if key == 'address7':
self.address7 = data[key]
if key == 'address8':
self.address8 = data[key]
if key == 'address9':
self.address9 = data[key]
if key == 'address10':
self.address10 = data[key]
if key == 'url':
self.url = data[key]
| true | true |
f73e6f052ed526a64258e377b569c0946315e4b4 | 1,858 | py | Python | python/code_challenges/graph/graph.py | dina-fouad/data-structures-and-algorithms | 53204f1c6d841ffe00849ff5f0fd0cd0469b6469 | [
"MIT"
] | null | null | null | python/code_challenges/graph/graph.py | dina-fouad/data-structures-and-algorithms | 53204f1c6d841ffe00849ff5f0fd0cd0469b6469 | [
"MIT"
] | 9 | 2021-08-01T19:29:35.000Z | 2021-09-05T19:58:03.000Z | python/code_challenges/graph/graph.py | dina-fouad/data-structures-and-algorithms | 53204f1c6d841ffe00849ff5f0fd0cd0469b6469 | [
"MIT"
] | null | null | null | from collections import deque
class Vertex:
def __init__(self,value):
self.value = value
class Edge:
def __init__(self,vertex,weight):
self.vertex = vertex
self.weight = weight
class Queue:
def __init__(self):
self.dq = deque()
def enqueue(self, value):
self.dq.appendleft(value)
def dequeue(self):
return self.dq.pop()
def __len__(self):
return len(self.dq)
class Graph:
def __init__(self):
self._adjacency_list = {}
def add_node(self, value):
node = Vertex(value)
self._adjacency_list[node] = []
return node
def size(self):
return len(self._adjacency_list)
def add_edge(self, start_node, end_node, weight=1):
if start_node not in self.adjacency_list:
raise KeyError('does not exist.')
if end_node not in self.adjacency_list:
raise KeyError('does not exist.')
adjacencies = self.adjacency_list[start_node]
adjacencies.append((end_node, weight))
def get_nodes(self):
return self._adjacency_list.keys()
def get_neighbors(self, vertex):
return self._adjacency_list.get(vertex, [])
def breadth_first_search(self, start_vertex, action=(lambda x: None)):
queue = Queue()
visited = set()
queue.enqueue(start_vertex)
visited.add(start_vertex)
while len(queue):
current_vertex = queue.dequeue()
action(current_vertex)
neighbors = self.get_neighbors(current_vertex)
for edge in neighbors:
neighbor_vertex = edge.vertex
if neighbor_vertex in visited:
continue
else:
visited.add(neighbor_vertex)
queue.enqueue(neighbor_vertex)
| 22.119048 | 74 | 0.597955 | from collections import deque
class Vertex:
def __init__(self,value):
self.value = value
class Edge:
def __init__(self,vertex,weight):
self.vertex = vertex
self.weight = weight
class Queue:
def __init__(self):
self.dq = deque()
def enqueue(self, value):
self.dq.appendleft(value)
def dequeue(self):
return self.dq.pop()
def __len__(self):
return len(self.dq)
class Graph:
def __init__(self):
self._adjacency_list = {}
def add_node(self, value):
node = Vertex(value)
self._adjacency_list[node] = []
return node
def size(self):
return len(self._adjacency_list)
def add_edge(self, start_node, end_node, weight=1):
if start_node not in self.adjacency_list:
raise KeyError('does not exist.')
if end_node not in self.adjacency_list:
raise KeyError('does not exist.')
adjacencies = self.adjacency_list[start_node]
adjacencies.append((end_node, weight))
def get_nodes(self):
return self._adjacency_list.keys()
def get_neighbors(self, vertex):
return self._adjacency_list.get(vertex, [])
def breadth_first_search(self, start_vertex, action=(lambda x: None)):
queue = Queue()
visited = set()
queue.enqueue(start_vertex)
visited.add(start_vertex)
while len(queue):
current_vertex = queue.dequeue()
action(current_vertex)
neighbors = self.get_neighbors(current_vertex)
for edge in neighbors:
neighbor_vertex = edge.vertex
if neighbor_vertex in visited:
continue
else:
visited.add(neighbor_vertex)
queue.enqueue(neighbor_vertex)
| true | true |
f73e6f20efb8f0b2e7cb2f404bada15ad5198f50 | 1,398 | py | Python | sphinx/conf.py | neuromodulation/py_neuromodulation | 1e8505d4324c9d2f37e5d56629a2ee418ea0b12b | [
"MIT"
] | 7 | 2021-05-12T02:13:12.000Z | 2022-02-28T13:14:23.000Z | sphinx/conf.py | neuromodulation/py_neuromodulation | 1e8505d4324c9d2f37e5d56629a2ee418ea0b12b | [
"MIT"
] | 98 | 2021-03-26T19:04:20.000Z | 2022-03-15T09:07:29.000Z | sphinx/conf.py | neuromodulation/py_neuromodulation | 1e8505d4324c9d2f37e5d56629a2ee418ea0b12b | [
"MIT"
] | 1 | 2021-07-16T10:39:01.000Z | 2021-07-16T10:39:01.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import sphinx_rtd_theme
import sys
import os
print("CURRENT WORKING DIRECTORY")
print(os.getcwd())
print('adding path')
sys.path.insert(0, r'C:\Users\ICN_admin\Documents\py_neuromodulation\pyneuromodulation')
print(sys.path)
# At top on conf.py (with other import statements)
import recommonmark
from recommonmark.transform import AutoStructify
from recommonmark.parser import CommonMarkParser
# -- Project information -----------------------------------------------------
project = 'py_neuromodulation'
copyright = '2021, Timon Merk'
author = 'Timon Merk'
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'numpydoc',
'sphinx_rtd_theme',
'sphinx.ext.napoleon',
'sphinx.ext.autosectionlabel',
'nbsphinx',
'recommonmark'
]
source_suffix = ['.rst', '.md', '.ipynb']
autosummary_generate = True
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
| 27.96 | 88 | 0.674535 |
import sphinx_rtd_theme
import sys
import os
print("CURRENT WORKING DIRECTORY")
print(os.getcwd())
print('adding path')
sys.path.insert(0, r'C:\Users\ICN_admin\Documents\py_neuromodulation\pyneuromodulation')
print(sys.path)
import recommonmark
from recommonmark.transform import AutoStructify
from recommonmark.parser import CommonMarkParser
project = 'py_neuromodulation'
copyright = '2021, Timon Merk'
author = 'Timon Merk'
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'numpydoc',
'sphinx_rtd_theme',
'sphinx.ext.napoleon',
'sphinx.ext.autosectionlabel',
'nbsphinx',
'recommonmark'
]
source_suffix = ['.rst', '.md', '.ipynb']
autosummary_generate = True
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
| true | true |
f73e6f7bc827d46b3a0a2b24106e5730a942c336 | 889 | py | Python | module3/before/face_detect.py | axel-sirota/build-face-recognition-azure | f9a4e2d721307bf9cb53a36ca82fdfa0e651fc86 | [
"MIT"
] | 1 | 2022-03-19T15:09:22.000Z | 2022-03-19T15:09:22.000Z | module3/before/face_detect.py | axel-sirota/build-face-recognition-azure | f9a4e2d721307bf9cb53a36ca82fdfa0e651fc86 | [
"MIT"
] | null | null | null | module3/before/face_detect.py | axel-sirota/build-face-recognition-azure | f9a4e2d721307bf9cb53a36ca82fdfa0e651fc86 | [
"MIT"
] | 1 | 2022-03-19T15:09:24.000Z | 2022-03-19T15:09:24.000Z | from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from pprint import pprint
import os
# Do not worry about this function, it is for pretty printing the attributes!
def pretty_print(klass, indent=0):
print(' ' * indent + type(klass).__name__ + ':')
indent += 4
for k, v in klass.__dict__.items():
if '__dict__' in dir(v):
pretty_print(v, indent)
elif isinstance(v, list):
for item in v:
pretty_print(item, indent)
else:
print(' ' * indent + k + ': ' + str(v))
# Authenticate
subscription_key = os.environ["AZURE_COMPUTER_VISION_SUBSCRIPTION_KEY"]
endpoint = os.environ["AZURE_COMPUTER_VISION_ENDPOINT"]
face_image_url = 'https://raw.githubusercontent.com/axel-sirota/build-face-recognition-azure/main/images/business.jpeg'
| 31.75 | 119 | 0.691789 | from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from pprint import pprint
import os
def pretty_print(klass, indent=0):
print(' ' * indent + type(klass).__name__ + ':')
indent += 4
for k, v in klass.__dict__.items():
if '__dict__' in dir(v):
pretty_print(v, indent)
elif isinstance(v, list):
for item in v:
pretty_print(item, indent)
else:
print(' ' * indent + k + ': ' + str(v))
subscription_key = os.environ["AZURE_COMPUTER_VISION_SUBSCRIPTION_KEY"]
endpoint = os.environ["AZURE_COMPUTER_VISION_ENDPOINT"]
face_image_url = 'https://raw.githubusercontent.com/axel-sirota/build-face-recognition-azure/main/images/business.jpeg'
| true | true |
f73e6fee70ce0b595fae730ecdc4cabd05536fb3 | 11,783 | py | Python | Lib/test/test_named_expressions.py | dhdavvie/cpython | c9345e382c630ddcc2b148b30954640e0e435c8a | [
"CNRI-Python-GPL-Compatible"
] | 2 | 2020-06-09T03:53:50.000Z | 2020-06-09T03:53:53.000Z | Lib/test/test_named_expressions.py | anuragkumarak95/cpython | d309352c6fd93a51f2b3011ca8c2125d3a5d394b | [
"CNRI-Python-GPL-Compatible"
] | 3 | 2020-03-15T21:17:00.000Z | 2020-03-15T22:50:40.000Z | Lib/test/test_named_expressions.py | anuragkumarak95/cpython | d309352c6fd93a51f2b3011ca8c2125d3a5d394b | [
"CNRI-Python-GPL-Compatible"
] | 2 | 2017-11-22T23:32:25.000Z | 2018-11-15T01:31:39.000Z | import os
import unittest
class NamedExpressionInvalidTest(unittest.TestCase):
def test_named_expression_invalid_01(self):
code = """x := 0"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_02(self):
code = """x = y := 0"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_03(self):
code = """y := f(x)"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_04(self):
code = """y0 = y1 := f(x)"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_06(self):
code = """((a, b) := (1, 2))"""
with self.assertRaisesRegex(SyntaxError, "cannot use named assignment with tuple"):
exec(code, {}, {})
def test_named_expression_invalid_07(self):
code = """def spam(a = b := 42): pass"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_08(self):
code = """def spam(a: b := 42 = 5): pass"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_09(self):
code = """spam(a=b := 'c')"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_10(self):
code = """spam(x = y := f(x))"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_11(self):
code = """spam(a=1, b := 2)"""
with self.assertRaisesRegex(SyntaxError,
"positional argument follows keyword argument"):
exec(code, {}, {})
def test_named_expression_invalid_12(self):
code = """spam(a=1, (b := 2))"""
with self.assertRaisesRegex(SyntaxError,
"positional argument follows keyword argument"):
exec(code, {}, {})
def test_named_expression_invalid_13(self):
code = """spam(a=1, (b := 2))"""
with self.assertRaisesRegex(SyntaxError,
"positional argument follows keyword argument"):
exec(code, {}, {})
def test_named_expression_invalid_14(self):
code = """(x := lambda: y := 1)"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_15(self):
code = """(lambda: x := 1)"""
with self.assertRaisesRegex(SyntaxError,
"cannot use named assignment with lambda"):
exec(code, {}, {})
def test_named_expression_invalid_16(self):
code = "[i + 1 for i in i := [1,2]]"
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_17(self):
code = "[i := 0, j := 1 for i, j in [(1, 2), (3, 4)]]"
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_18(self):
code = """class Foo():
[(42, 1 + ((( j := i )))) for i in range(5)]
"""
with self.assertRaisesRegex(TargetScopeError,
"named expression within a comprehension cannot be used in a class body"):
exec(code, {}, {})
class NamedExpressionAssignmentTest(unittest.TestCase):
def test_named_expression_assignment_01(self):
(a := 10)
self.assertEqual(a, 10)
def test_named_expression_assignment_02(self):
a = 20
(a := a)
self.assertEqual(a, 20)
def test_named_expression_assignment_03(self):
(total := 1 + 2)
self.assertEqual(total, 3)
def test_named_expression_assignment_04(self):
(info := (1, 2, 3))
self.assertEqual(info, (1, 2, 3))
def test_named_expression_assignment_05(self):
(x := 1, 2)
self.assertEqual(x, 1)
def test_named_expression_assignment_06(self):
(z := (y := (x := 0)))
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_named_expression_assignment_07(self):
(loc := (1, 2))
self.assertEqual(loc, (1, 2))
def test_named_expression_assignment_08(self):
if spam := "eggs":
self.assertEqual(spam, "eggs")
else: self.fail("variable was not assigned using named expression")
def test_named_expression_assignment_09(self):
if True and (spam := True):
self.assertTrue(spam)
else: self.fail("variable was not assigned using named expression")
def test_named_expression_assignment_10(self):
if (match := 10) == 10:
pass
else: self.fail("variable was not assigned using named expression")
def test_named_expression_assignment_11(self):
def spam(a):
return a
input_data = [1, 2, 3]
res = [(x, y, x/y) for x in input_data if (y := spam(x)) > 0]
self.assertEqual(res, [(1, 1, 1.0), (2, 2, 1.0), (3, 3, 1.0)])
def test_named_expression_assignment_12(self):
def spam(a):
return a
res = [[y := spam(x), x/y] for x in range(1, 5)]
self.assertEqual(res, [[1, 1.0], [2, 1.0], [3, 1.0], [4, 1.0]])
def test_named_expression_assignment_13(self):
length = len(lines := [1, 2])
self.assertEqual(length, 2)
self.assertEqual(lines, [1,2])
def test_named_expression_assignment_14(self):
"""
Where all variables are positive integers, and a is at least as large
as the n'th root of x, this algorithm returns the floor of the n'th
root of x (and roughly doubling the number of accurate bits per
iteration):
"""
a = 9
n = 2
x = 3
while a > (d := x // a**(n-1)):
a = ((n-1)*a + d) // n
self.assertEqual(a, 1)
def test_named_expression_assignment_15(self):
while a := False:
pass # This will not run
self.assertEqual(a, False)
def test_named_expression_assignment_16(self):
a, b = 1, 2
fib = {(c := a): (a := b) + (b := a + c) - b for __ in range(6)}
self.assertEqual(fib, {1: 2, 2: 3, 3: 5, 5: 8, 8: 13, 13: 21})
class NamedExpressionScopeTest(unittest.TestCase):
def test_named_expression_scope_01(self):
code = """def spam():
(a := 5)
print(a)"""
with self.assertRaisesRegex(NameError, "name 'a' is not defined"):
exec(code, {}, {})
def test_named_expression_scope_02(self):
total = 0
partial_sums = [total := total + v for v in range(5)]
self.assertEqual(partial_sums, [0, 1, 3, 6, 10])
self.assertEqual(total, 10)
def test_named_expression_scope_03(self):
containsOne = any((lastNum := num) == 1 for num in [1, 2, 3])
self.assertTrue(containsOne)
self.assertEqual(lastNum, 1)
def test_named_expression_scope_04(self):
def spam(a):
return a
res = [[y := spam(x), x/y] for x in range(1, 5)]
self.assertEqual(y, 4)
def test_named_expression_scope_05(self):
def spam(a):
return a
input_data = [1, 2, 3]
res = [(x, y, x/y) for x in input_data if (y := spam(x)) > 0]
self.assertEqual(res, [(1, 1, 1.0), (2, 2, 1.0), (3, 3, 1.0)])
self.assertEqual(y, 3)
def test_named_expression_scope_06(self):
res = [[spam := i for i in range(3)] for j in range(2)]
self.assertEqual(res, [[0, 1, 2], [0, 1, 2]])
self.assertEqual(spam, 2)
def test_named_expression_scope_07(self):
len(lines := [1, 2])
self.assertEqual(lines, [1, 2])
def test_named_expression_scope_08(self):
def spam(a):
return a
def eggs(b):
return b * 2
res = [spam(a := eggs(b := h)) for h in range(2)]
self.assertEqual(res, [0, 2])
self.assertEqual(a, 2)
self.assertEqual(b, 1)
def test_named_expression_scope_09(self):
def spam(a):
return a
def eggs(b):
return b * 2
res = [spam(a := eggs(a := h)) for h in range(2)]
self.assertEqual(res, [0, 2])
self.assertEqual(a, 2)
def test_named_expression_scope_10(self):
res = [b := [a := 1 for i in range(2)] for j in range(2)]
self.assertEqual(res, [[1, 1], [1, 1]])
self.assertEqual(a, 1)
self.assertEqual(b, [1, 1])
def test_named_expression_scope_11(self):
res = [j := i for i in range(5)]
self.assertEqual(res, [0, 1, 2, 3, 4])
self.assertEqual(j, 4)
def test_named_expression_scope_12(self):
res = [i := i for i in range(5)]
self.assertEqual(res, [0, 1, 2, 3, 4])
self.assertEqual(i, 4)
def test_named_expression_scope_13(self):
res = [i := 0 for i, j in [(1, 2), (3, 4)]]
self.assertEqual(res, [0, 0])
self.assertEqual(i, 0)
def test_named_expression_scope_14(self):
res = [(i := 0, j := 1) for i, j in [(1, 2), (3, 4)]]
self.assertEqual(res, [(0, 1), (0, 1)])
self.assertEqual(i, 0)
self.assertEqual(j, 1)
def test_named_expression_scope_15(self):
res = [(i := i, j := j) for i, j in [(1, 2), (3, 4)]]
self.assertEqual(res, [(1, 2), (3, 4)])
self.assertEqual(i, 3)
self.assertEqual(j, 4)
def test_named_expression_scope_16(self):
res = [(i := j, j := i) for i, j in [(1, 2), (3, 4)]]
self.assertEqual(res, [(2, 2), (4, 4)])
self.assertEqual(i, 4)
self.assertEqual(j, 4)
def test_named_expression_scope_17(self):
b = 0
res = [b := i + b for i in range(5)]
self.assertEqual(res, [0, 1, 3, 6, 10])
self.assertEqual(b, 10)
def test_named_expression_scope_18(self):
def spam(a):
return a
res = spam(b := 2)
self.assertEqual(res, 2)
self.assertEqual(b, 2)
def test_named_expression_scope_19(self):
def spam(a):
return a
res = spam((b := 2))
self.assertEqual(res, 2)
self.assertEqual(b, 2)
def test_named_expression_scope_20(self):
def spam(a):
return a
res = spam(a=(b := 2))
self.assertEqual(res, 2)
self.assertEqual(b, 2)
def test_named_expression_scope_21(self):
def spam(a, b):
return a + b
res = spam(c := 2, b=1)
self.assertEqual(res, 3)
self.assertEqual(c, 2)
def test_named_expression_scope_22(self):
def spam(a, b):
return a + b
res = spam((c := 2), b=1)
self.assertEqual(res, 3)
self.assertEqual(c, 2)
def test_named_expression_scope_23(self):
def spam(a, b):
return a + b
res = spam(b=(c := 2), a=1)
self.assertEqual(res, 3)
self.assertEqual(c, 2)
def test_named_expression_scope_24(self):
a = 10
def spam():
nonlocal a
(a := 20)
spam()
self.assertEqual(a, 20)
def test_named_expression_scope_25(self):
ns = {}
code = """a = 10
def spam():
global a
(a := 20)
spam()"""
exec(code, ns, {})
self.assertEqual(ns["a"], 20)
if __name__ == "__main__":
unittest.main()
| 27.594848 | 91 | 0.55614 | import os
import unittest
class NamedExpressionInvalidTest(unittest.TestCase):
def test_named_expression_invalid_01(self):
code = """x := 0"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_02(self):
code = """x = y := 0"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_03(self):
code = """y := f(x)"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_04(self):
code = """y0 = y1 := f(x)"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_06(self):
code = """((a, b) := (1, 2))"""
with self.assertRaisesRegex(SyntaxError, "cannot use named assignment with tuple"):
exec(code, {}, {})
def test_named_expression_invalid_07(self):
code = """def spam(a = b := 42): pass"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_08(self):
code = """def spam(a: b := 42 = 5): pass"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_09(self):
code = """spam(a=b := 'c')"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_10(self):
code = """spam(x = y := f(x))"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_11(self):
code = """spam(a=1, b := 2)"""
with self.assertRaisesRegex(SyntaxError,
"positional argument follows keyword argument"):
exec(code, {}, {})
def test_named_expression_invalid_12(self):
code = """spam(a=1, (b := 2))"""
with self.assertRaisesRegex(SyntaxError,
"positional argument follows keyword argument"):
exec(code, {}, {})
def test_named_expression_invalid_13(self):
code = """spam(a=1, (b := 2))"""
with self.assertRaisesRegex(SyntaxError,
"positional argument follows keyword argument"):
exec(code, {}, {})
def test_named_expression_invalid_14(self):
code = """(x := lambda: y := 1)"""
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_15(self):
code = """(lambda: x := 1)"""
with self.assertRaisesRegex(SyntaxError,
"cannot use named assignment with lambda"):
exec(code, {}, {})
def test_named_expression_invalid_16(self):
code = "[i + 1 for i in i := [1,2]]"
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_17(self):
code = "[i := 0, j := 1 for i, j in [(1, 2), (3, 4)]]"
with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
exec(code, {}, {})
def test_named_expression_invalid_18(self):
code = """class Foo():
[(42, 1 + ((( j := i )))) for i in range(5)]
"""
with self.assertRaisesRegex(TargetScopeError,
"named expression within a comprehension cannot be used in a class body"):
exec(code, {}, {})
class NamedExpressionAssignmentTest(unittest.TestCase):
def test_named_expression_assignment_01(self):
(a := 10)
self.assertEqual(a, 10)
def test_named_expression_assignment_02(self):
a = 20
(a := a)
self.assertEqual(a, 20)
def test_named_expression_assignment_03(self):
(total := 1 + 2)
self.assertEqual(total, 3)
def test_named_expression_assignment_04(self):
(info := (1, 2, 3))
self.assertEqual(info, (1, 2, 3))
def test_named_expression_assignment_05(self):
(x := 1, 2)
self.assertEqual(x, 1)
def test_named_expression_assignment_06(self):
(z := (y := (x := 0)))
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_named_expression_assignment_07(self):
(loc := (1, 2))
self.assertEqual(loc, (1, 2))
def test_named_expression_assignment_08(self):
if spam := "eggs":
self.assertEqual(spam, "eggs")
else: self.fail("variable was not assigned using named expression")
def test_named_expression_assignment_09(self):
if True and (spam := True):
self.assertTrue(spam)
else: self.fail("variable was not assigned using named expression")
def test_named_expression_assignment_10(self):
if (match := 10) == 10:
pass
else: self.fail("variable was not assigned using named expression")
def test_named_expression_assignment_11(self):
def spam(a):
return a
input_data = [1, 2, 3]
res = [(x, y, x/y) for x in input_data if (y := spam(x)) > 0]
self.assertEqual(res, [(1, 1, 1.0), (2, 2, 1.0), (3, 3, 1.0)])
def test_named_expression_assignment_12(self):
def spam(a):
return a
res = [[y := spam(x), x/y] for x in range(1, 5)]
self.assertEqual(res, [[1, 1.0], [2, 1.0], [3, 1.0], [4, 1.0]])
def test_named_expression_assignment_13(self):
length = len(lines := [1, 2])
self.assertEqual(length, 2)
self.assertEqual(lines, [1,2])
def test_named_expression_assignment_14(self):
a = 9
n = 2
x = 3
while a > (d := x // a**(n-1)):
a = ((n-1)*a + d) // n
self.assertEqual(a, 1)
def test_named_expression_assignment_15(self):
while a := False:
pass
self.assertEqual(a, False)
def test_named_expression_assignment_16(self):
a, b = 1, 2
fib = {(c := a): (a := b) + (b := a + c) - b for __ in range(6)}
self.assertEqual(fib, {1: 2, 2: 3, 3: 5, 5: 8, 8: 13, 13: 21})
class NamedExpressionScopeTest(unittest.TestCase):
def test_named_expression_scope_01(self):
code = """def spam():
(a := 5)
print(a)"""
with self.assertRaisesRegex(NameError, "name 'a' is not defined"):
exec(code, {}, {})
def test_named_expression_scope_02(self):
total = 0
partial_sums = [total := total + v for v in range(5)]
self.assertEqual(partial_sums, [0, 1, 3, 6, 10])
self.assertEqual(total, 10)
def test_named_expression_scope_03(self):
containsOne = any((lastNum := num) == 1 for num in [1, 2, 3])
self.assertTrue(containsOne)
self.assertEqual(lastNum, 1)
def test_named_expression_scope_04(self):
def spam(a):
return a
res = [[y := spam(x), x/y] for x in range(1, 5)]
self.assertEqual(y, 4)
def test_named_expression_scope_05(self):
def spam(a):
return a
input_data = [1, 2, 3]
res = [(x, y, x/y) for x in input_data if (y := spam(x)) > 0]
self.assertEqual(res, [(1, 1, 1.0), (2, 2, 1.0), (3, 3, 1.0)])
self.assertEqual(y, 3)
def test_named_expression_scope_06(self):
res = [[spam := i for i in range(3)] for j in range(2)]
self.assertEqual(res, [[0, 1, 2], [0, 1, 2]])
self.assertEqual(spam, 2)
def test_named_expression_scope_07(self):
len(lines := [1, 2])
self.assertEqual(lines, [1, 2])
def test_named_expression_scope_08(self):
def spam(a):
return a
def eggs(b):
return b * 2
res = [spam(a := eggs(b := h)) for h in range(2)]
self.assertEqual(res, [0, 2])
self.assertEqual(a, 2)
self.assertEqual(b, 1)
def test_named_expression_scope_09(self):
def spam(a):
return a
def eggs(b):
return b * 2
res = [spam(a := eggs(a := h)) for h in range(2)]
self.assertEqual(res, [0, 2])
self.assertEqual(a, 2)
def test_named_expression_scope_10(self):
res = [b := [a := 1 for i in range(2)] for j in range(2)]
self.assertEqual(res, [[1, 1], [1, 1]])
self.assertEqual(a, 1)
self.assertEqual(b, [1, 1])
def test_named_expression_scope_11(self):
res = [j := i for i in range(5)]
self.assertEqual(res, [0, 1, 2, 3, 4])
self.assertEqual(j, 4)
def test_named_expression_scope_12(self):
res = [i := i for i in range(5)]
self.assertEqual(res, [0, 1, 2, 3, 4])
self.assertEqual(i, 4)
def test_named_expression_scope_13(self):
res = [i := 0 for i, j in [(1, 2), (3, 4)]]
self.assertEqual(res, [0, 0])
self.assertEqual(i, 0)
def test_named_expression_scope_14(self):
res = [(i := 0, j := 1) for i, j in [(1, 2), (3, 4)]]
self.assertEqual(res, [(0, 1), (0, 1)])
self.assertEqual(i, 0)
self.assertEqual(j, 1)
def test_named_expression_scope_15(self):
res = [(i := i, j := j) for i, j in [(1, 2), (3, 4)]]
self.assertEqual(res, [(1, 2), (3, 4)])
self.assertEqual(i, 3)
self.assertEqual(j, 4)
def test_named_expression_scope_16(self):
res = [(i := j, j := i) for i, j in [(1, 2), (3, 4)]]
self.assertEqual(res, [(2, 2), (4, 4)])
self.assertEqual(i, 4)
self.assertEqual(j, 4)
def test_named_expression_scope_17(self):
b = 0
res = [b := i + b for i in range(5)]
self.assertEqual(res, [0, 1, 3, 6, 10])
self.assertEqual(b, 10)
def test_named_expression_scope_18(self):
def spam(a):
return a
res = spam(b := 2)
self.assertEqual(res, 2)
self.assertEqual(b, 2)
def test_named_expression_scope_19(self):
def spam(a):
return a
res = spam((b := 2))
self.assertEqual(res, 2)
self.assertEqual(b, 2)
def test_named_expression_scope_20(self):
def spam(a):
return a
res = spam(a=(b := 2))
self.assertEqual(res, 2)
self.assertEqual(b, 2)
def test_named_expression_scope_21(self):
def spam(a, b):
return a + b
res = spam(c := 2, b=1)
self.assertEqual(res, 3)
self.assertEqual(c, 2)
def test_named_expression_scope_22(self):
def spam(a, b):
return a + b
res = spam((c := 2), b=1)
self.assertEqual(res, 3)
self.assertEqual(c, 2)
def test_named_expression_scope_23(self):
def spam(a, b):
return a + b
res = spam(b=(c := 2), a=1)
self.assertEqual(res, 3)
self.assertEqual(c, 2)
def test_named_expression_scope_24(self):
a = 10
def spam():
nonlocal a
(a := 20)
spam()
self.assertEqual(a, 20)
def test_named_expression_scope_25(self):
ns = {}
code = """a = 10
def spam():
global a
(a := 20)
spam()"""
exec(code, ns, {})
self.assertEqual(ns["a"], 20)
if __name__ == "__main__":
unittest.main()
| true | true |
f73e7006359c26e3644c0d15ff717fc875f2b778 | 88 | py | Python | helpers/__init__.py | r-luo/QA-Summary | 219dbbd306a85f73c126ad73ef7421c5450afbfc | [
"MIT"
] | null | null | null | helpers/__init__.py | r-luo/QA-Summary | 219dbbd306a85f73c126ad73ef7421c5450afbfc | [
"MIT"
] | null | null | null | helpers/__init__.py | r-luo/QA-Summary | 219dbbd306a85f73c126ad73ef7421c5450afbfc | [
"MIT"
] | null | null | null | from . import utils
from . import data_prep
from . import seq2seq
from . import word2vec | 22 | 23 | 0.784091 | from . import utils
from . import data_prep
from . import seq2seq
from . import word2vec | true | true |
f73e70a84f6c6fc4600a9dde440c4bedad44e5ee | 2,972 | py | Python | modules/nosferatu/reaction_message.py | tellurion-code/tellurion-bot-py | 0b0d580f4479d814ac6239a6bea07ee4f2a0ab70 | [
"MIT"
] | 9 | 2018-05-06T15:52:45.000Z | 2021-07-14T12:06:46.000Z | modules/nosferatu/reaction_message.py | epenserdiscord/epenser-bot-py | a225bc5ab505d1bde9fe153c44e34554588fe238 | [
"MIT"
] | 4 | 2018-06-13T07:21:53.000Z | 2021-06-11T20:49:18.000Z | modules/nosferatu/reaction_message.py | tellurion-code/tellurion-bot-py | 0b0d580f4479d814ac6239a6bea07ee4f2a0ab70 | [
"MIT"
] | 12 | 2018-06-25T18:29:03.000Z | 2021-06-10T06:26:32.000Z | import discord
import modules.nosferatu.globals as globals
class ReactionMessage:
def __init__(self, _cond, _effect, **kwargs):
self.check = kwargs["check"] if "check" in kwargs else lambda r, u: True
self.update_function = kwargs["update"] if "update" in kwargs else None
self.temporary = kwargs["temporary"] if "temporary" in kwargs else True
self.cond = _cond
self.effect = _effect
self.reactions = {}
#Envoies le choix
async def send(self, _channel, _title, _description, _color, _choices):
if len(_choices) == 0:
raise "Le nombre de choix doit être supérieur à 0"
embed = discord.Embed(
title = _title,
description = _description,
color = _color
)
self.number_emojis = globals.number_emojis[:len(_choices)]
self.number_emojis.append("✅")
i = 0
for choice in _choices:
embed.description += self.number_emojis[i] + " " + choice + "\n"
i += 1
self.message = await _channel.send(embed = embed)
for i in range(len(_choices)):
await self.message.add_reaction(self.number_emojis[i])
globals.reaction_messages.append(self)
#Trigger quand une réaction est ajoutée
async def add_reaction(self, reaction, user):
print("add " + str(user.id))
if user.id in self.reactions:
if reaction.emoji != "✅":
self.reactions[user.id].append(self.number_emojis.index(reaction.emoji))
else:
self.reactions[user.id] = [self.number_emojis.index(reaction.emoji)]
if reaction.emoji == "✅" and await self.cond(self.reactions):
await self.effect(self.reactions)
if self.temporary:
await self.message.delete()
globals.reaction_messages.remove(self)
else:
await self.update(reaction)
#Trigger quand une réaction est retirée
async def remove_reaction(self, reaction, user):
print("remove " + str(user.id))
if user.id in self.reactions:
if self.number_emojis.index(reaction.emoji) in self.reactions[user.id]:
self.reactions[user.id].remove(self.number_emojis.index(reaction.emoji))
await self.update(reaction)
#Vérifie si la coche doit être affichée et fait la fonction update_function si elle existe
async def update(self, reaction):
print("UPDATE")
print(self.reactions)
if self.update_function:
await self.update_function(self.reactions)
if await self.cond(self.reactions):
print("Try and add")
await self.message.add_reaction("✅")
else:
if reaction.message.guild:
await self.message.remove_reaction("✅", reaction.message.guild.me)
else:
await self.message.remove_reaction("✅", reaction.message.channel.me)
| 35.807229 | 94 | 0.61642 | import discord
import modules.nosferatu.globals as globals
class ReactionMessage:
def __init__(self, _cond, _effect, **kwargs):
self.check = kwargs["check"] if "check" in kwargs else lambda r, u: True
self.update_function = kwargs["update"] if "update" in kwargs else None
self.temporary = kwargs["temporary"] if "temporary" in kwargs else True
self.cond = _cond
self.effect = _effect
self.reactions = {}
async def send(self, _channel, _title, _description, _color, _choices):
if len(_choices) == 0:
raise "Le nombre de choix doit être supérieur à 0"
embed = discord.Embed(
title = _title,
description = _description,
color = _color
)
self.number_emojis = globals.number_emojis[:len(_choices)]
self.number_emojis.append("✅")
i = 0
for choice in _choices:
embed.description += self.number_emojis[i] + " " + choice + "\n"
i += 1
self.message = await _channel.send(embed = embed)
for i in range(len(_choices)):
await self.message.add_reaction(self.number_emojis[i])
globals.reaction_messages.append(self)
async def add_reaction(self, reaction, user):
print("add " + str(user.id))
if user.id in self.reactions:
if reaction.emoji != "✅":
self.reactions[user.id].append(self.number_emojis.index(reaction.emoji))
else:
self.reactions[user.id] = [self.number_emojis.index(reaction.emoji)]
if reaction.emoji == "✅" and await self.cond(self.reactions):
await self.effect(self.reactions)
if self.temporary:
await self.message.delete()
globals.reaction_messages.remove(self)
else:
await self.update(reaction)
async def remove_reaction(self, reaction, user):
print("remove " + str(user.id))
if user.id in self.reactions:
if self.number_emojis.index(reaction.emoji) in self.reactions[user.id]:
self.reactions[user.id].remove(self.number_emojis.index(reaction.emoji))
await self.update(reaction)
async def update(self, reaction):
print("UPDATE")
print(self.reactions)
if self.update_function:
await self.update_function(self.reactions)
if await self.cond(self.reactions):
print("Try and add")
await self.message.add_reaction("✅")
else:
if reaction.message.guild:
await self.message.remove_reaction("✅", reaction.message.guild.me)
else:
await self.message.remove_reaction("✅", reaction.message.channel.me)
| true | true |
f73e70eb4950eb2382a3464b95c89405bea3c4e2 | 2,197 | py | Python | model/single_doc/longformer_model.py | NickSchoelkopf/SummerTime | 9a89aab8e1544e3c52c043b9c47ab325e665e11e | [
"Apache-2.0"
] | null | null | null | model/single_doc/longformer_model.py | NickSchoelkopf/SummerTime | 9a89aab8e1544e3c52c043b9c47ab325e665e11e | [
"Apache-2.0"
] | null | null | null | model/single_doc/longformer_model.py | NickSchoelkopf/SummerTime | 9a89aab8e1544e3c52c043b9c47ab325e665e11e | [
"Apache-2.0"
] | null | null | null | from transformers import LongformerTokenizer, EncoderDecoderModel
from .base_single_doc_model import SingleDocSummModel
class LongformerModel(SingleDocSummModel):
# static variables
model_name = "Longformer"
is_extractive = False
is_neural = True
def __init__(self):
super(LongformerModel, self).__init__(
trained_domain="News", max_input_length=4096, max_output_length=None
)
self.model = EncoderDecoderModel.from_pretrained(
"patrickvonplaten/longformer2roberta-cnn_dailymail-fp16"
)
self.tokenizer = LongformerTokenizer.from_pretrained(
"allenai/longformer-base-4096"
)
def summarize(self, corpus, queries=None):
self.assert_summ_input_type(corpus, queries)
summaries = list(map(lambda doc: self.summarize_single(doc), corpus))
return summaries
def summarize_single(self, document):
# Tokenizes document and returns PyTorch torch.Tensor object with length attribute
tokenized_sequence = self.tokenizer(
document,
return_tensors="pt",
return_length=True,
truncation=True,
max_length=4096,
)
print(
f"Longformer model: processing document of {tokenized_sequence.length} tokens"
)
input_ids = tokenized_sequence.input_ids
# output_ids is tensor with one layer: output_ids[0] extracts tensor layer for decoding
output_ids = self.model.generate(input_ids)
return self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
@classmethod
def show_capability(cls) -> None:
basic_description = cls.generate_basic_description()
more_details = (
"A Longformer2Roberta model finetuned on CNN-DM dataset for summarization.\n\n"
"Strengths:\n - Correctly handles longer (> 2000 tokens) corpus.\n\n"
"Weaknesses:\n - Less accurate on contexts outside training domain.\n\n"
"Initialization arguments:\n "
" - `corpus`: Unlabelled corpus of documents.\n"
)
print(f"{basic_description} \n {'#'*20} \n {more_details}")
| 36.616667 | 95 | 0.665453 | from transformers import LongformerTokenizer, EncoderDecoderModel
from .base_single_doc_model import SingleDocSummModel
class LongformerModel(SingleDocSummModel):
model_name = "Longformer"
is_extractive = False
is_neural = True
def __init__(self):
super(LongformerModel, self).__init__(
trained_domain="News", max_input_length=4096, max_output_length=None
)
self.model = EncoderDecoderModel.from_pretrained(
"patrickvonplaten/longformer2roberta-cnn_dailymail-fp16"
)
self.tokenizer = LongformerTokenizer.from_pretrained(
"allenai/longformer-base-4096"
)
def summarize(self, corpus, queries=None):
self.assert_summ_input_type(corpus, queries)
summaries = list(map(lambda doc: self.summarize_single(doc), corpus))
return summaries
def summarize_single(self, document):
tokenized_sequence = self.tokenizer(
document,
return_tensors="pt",
return_length=True,
truncation=True,
max_length=4096,
)
print(
f"Longformer model: processing document of {tokenized_sequence.length} tokens"
)
input_ids = tokenized_sequence.input_ids
output_ids = self.model.generate(input_ids)
return self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
@classmethod
def show_capability(cls) -> None:
basic_description = cls.generate_basic_description()
more_details = (
"A Longformer2Roberta model finetuned on CNN-DM dataset for summarization.\n\n"
"Strengths:\n - Correctly handles longer (> 2000 tokens) corpus.\n\n"
"Weaknesses:\n - Less accurate on contexts outside training domain.\n\n"
"Initialization arguments:\n "
" - `corpus`: Unlabelled corpus of documents.\n"
)
print(f"{basic_description} \n {'#'*20} \n {more_details}")
| true | true |
f73e70ef65fd2d764ecf1cc60292802342d9661f | 5,821 | py | Python | NeuralATT/train.py | INK-USC/shifted-label-distribution | 3cf2b7ced3b2e18234db405f6014f049c4830d71 | [
"Apache-2.0"
] | 37 | 2019-10-29T13:12:41.000Z | 2022-01-20T02:42:28.000Z | NeuralATT/train.py | INK-USC/shifted-label-distribution | 3cf2b7ced3b2e18234db405f6014f049c4830d71 | [
"Apache-2.0"
] | 5 | 2020-07-23T10:32:59.000Z | 2021-09-01T11:37:15.000Z | NeuralATT/train.py | INK-USC/shifted-label-distribution | 3cf2b7ced3b2e18234db405f6014f049c4830d71 | [
"Apache-2.0"
] | 2 | 2020-05-27T06:00:56.000Z | 2021-02-08T10:45:41.000Z | '''
Training script with ramdom splitting dev set
'''
__author__ = 'Maosen'
import torch
from model import Model, Wrapper
import utils
from utils import Dataset
import argparse
import pickle
import numpy as np
from tqdm import tqdm
import logging
import os
import random
torch.backends.cudnn.deterministic = True
def train(args):
# Training
logging.info(str(args))
model = Model(args, device, rel2id, emb_matrix)
wrapper = Wrapper(model, args, device, train_dset.rel2id)
max_dev_f1 = 0.0
test_result_on_max_dev_f1 = (0.0, 0.0, 0.0)
for iter in range(niter):
# print('Iteration %d:' % iter)
loss = 0.0
for idx, batch in enumerate(tqdm(train_dset.batched_data)):
scope = train_dset.batched_scope[idx]
loss_batch = wrapper.update(batch, scope)
loss += loss_batch
loss /= len(train_dset.batched_data)
valid_loss, (dev_prec, dev_recall, dev_f1), _, _, _ = wrapper.eval(dev_dset)
logging.info('Iteration %d, Train loss %f' % (iter, loss))
logging.info(
'Dev loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(valid_loss, dev_prec, dev_recall,
dev_f1))
test_loss, (test_prec, test_recall, test_f1), _, _, _ = wrapper.eval(test_dset)
logging.info(
'Test loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(test_loss, test_prec, test_recall,
test_f1))
if dev_f1 > max_dev_f1:
max_dev_f1 = dev_f1
test_result_on_max_dev_f1 = (test_prec, test_recall, test_f1)
save_filename = os.path.join(args.save_dir, '%s_%d.pkl' % (args.info, runid))
wrapper.save(save_filename, iter)
wrapper.update_lr(valid_loss)
logging.info('Max dev F1: %f' % max_dev_f1)
test_p, test_r, test_f1 = test_result_on_max_dev_f1
logging.info('Test P, R, F1 on best epoch: {:.4f}, {:.4f}, {:.4f}'.format(test_p, test_r, test_f1))
logging.info('\n')
return max_dev_f1, test_result_on_max_dev_f1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/neural_att/KBP')
parser.add_argument('--vocab_dir', type=str, default='data/neural/vocab')
parser.add_argument('--encoder', type=str, default='pcnn', help='Model')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')
parser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')
parser.add_argument('--position_dim', type=int, default=30, help='Position encoding dimension.')
parser.add_argument('--hidden', type=int, default=230, help='RNN hidden state size.')
parser.add_argument('--window_size', type=int, default=3, help='Convolution window size')
parser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')
parser.add_argument('--bidirectional', dest='bidirectional', action='store_true', help='Bidirectional RNN.')
parser.set_defaults(bidirectional=True)
# Data Loading & Pre-processing
parser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')
parser.add_argument('--no-lower', dest='lower', action='store_false')
parser.set_defaults(lower=True)
parser.add_argument('--batch_size', type=int, default=64)
# Optimization
parser.add_argument('--lr', type=float, default=1.0, help='Applies to SGD and Adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--num_epoch', type=int, default=30)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
# Optimization - Dropout
parser.add_argument('--dropout', type=float, default=0.5, help='Input and RNN dropout rate.')
parser.add_argument('--in_drop', type=float, default=0.5, help='Input dropout rate.')
parser.add_argument('--intra_drop', type=float, default=0.3, help='Intra-layer dropout rate.')
parser.add_argument('--out_drop', type=float, default=0.7, help='Output dropout rate.')
# Other options
parser.add_argument('--seed', type=int, default=7698)
parser.add_argument('--repeat', type=int, default=5)
parser.add_argument('--save_dir', type=str, default='./dumped_models', help='Root dir for saving models.')
parser.add_argument('--info', type=str, default='KBP_default_ATT', help='Optional info for the experiment.')
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Load vocab file (id2word)
with open(args.vocab_dir + '/vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
word2id = {}
for idx, word in enumerate(vocab):
word2id[word] = idx
# Load word embedding
emb_file = args.vocab_dir + '/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == len(vocab)
assert emb_matrix.shape[1] == args.emb_dim
args.vocab_size = len(vocab)
niter = args.num_epoch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Using device: %s' % device.type)
print('Reading data......')
rel2id = utils.load_rel2id('%s/relation2id.json' % args.data_dir)
train_filename = '%s/train.json' % args.data_dir
test_filename = '%s/test.json' % args.data_dir
dev_filename = '%s/dev.json' % args.data_dir
train_dset = Dataset(train_filename, args, word2id, device, rel2id=rel2id, shuffle=True, use_bag=True)
test_dset = Dataset(test_filename, args, word2id, device, rel2id=rel2id, use_bag=False)
dev_dset = Dataset(dev_filename, args, word2id, device, rel2id=rel2id, use_bag=False)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
for runid in range(1, args.repeat + 1):
logging.info('Run model %d times......' % runid)
dev_f1, test_result = train(args)
logging.info('')
| 38.296053 | 109 | 0.716715 | __author__ = 'Maosen'
import torch
from model import Model, Wrapper
import utils
from utils import Dataset
import argparse
import pickle
import numpy as np
from tqdm import tqdm
import logging
import os
import random
torch.backends.cudnn.deterministic = True
def train(args):
logging.info(str(args))
model = Model(args, device, rel2id, emb_matrix)
wrapper = Wrapper(model, args, device, train_dset.rel2id)
max_dev_f1 = 0.0
test_result_on_max_dev_f1 = (0.0, 0.0, 0.0)
for iter in range(niter):
loss = 0.0
for idx, batch in enumerate(tqdm(train_dset.batched_data)):
scope = train_dset.batched_scope[idx]
loss_batch = wrapper.update(batch, scope)
loss += loss_batch
loss /= len(train_dset.batched_data)
valid_loss, (dev_prec, dev_recall, dev_f1), _, _, _ = wrapper.eval(dev_dset)
logging.info('Iteration %d, Train loss %f' % (iter, loss))
logging.info(
'Dev loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(valid_loss, dev_prec, dev_recall,
dev_f1))
test_loss, (test_prec, test_recall, test_f1), _, _, _ = wrapper.eval(test_dset)
logging.info(
'Test loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(test_loss, test_prec, test_recall,
test_f1))
if dev_f1 > max_dev_f1:
max_dev_f1 = dev_f1
test_result_on_max_dev_f1 = (test_prec, test_recall, test_f1)
save_filename = os.path.join(args.save_dir, '%s_%d.pkl' % (args.info, runid))
wrapper.save(save_filename, iter)
wrapper.update_lr(valid_loss)
logging.info('Max dev F1: %f' % max_dev_f1)
test_p, test_r, test_f1 = test_result_on_max_dev_f1
logging.info('Test P, R, F1 on best epoch: {:.4f}, {:.4f}, {:.4f}'.format(test_p, test_r, test_f1))
logging.info('\n')
return max_dev_f1, test_result_on_max_dev_f1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/neural_att/KBP')
parser.add_argument('--vocab_dir', type=str, default='data/neural/vocab')
parser.add_argument('--encoder', type=str, default='pcnn', help='Model')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')
parser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')
parser.add_argument('--position_dim', type=int, default=30, help='Position encoding dimension.')
parser.add_argument('--hidden', type=int, default=230, help='RNN hidden state size.')
parser.add_argument('--window_size', type=int, default=3, help='Convolution window size')
parser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')
parser.add_argument('--bidirectional', dest='bidirectional', action='store_true', help='Bidirectional RNN.')
parser.set_defaults(bidirectional=True)
parser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')
parser.add_argument('--no-lower', dest='lower', action='store_false')
parser.set_defaults(lower=True)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=1.0, help='Applies to SGD and Adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--num_epoch', type=int, default=30)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--dropout', type=float, default=0.5, help='Input and RNN dropout rate.')
parser.add_argument('--in_drop', type=float, default=0.5, help='Input dropout rate.')
parser.add_argument('--intra_drop', type=float, default=0.3, help='Intra-layer dropout rate.')
parser.add_argument('--out_drop', type=float, default=0.7, help='Output dropout rate.')
parser.add_argument('--seed', type=int, default=7698)
parser.add_argument('--repeat', type=int, default=5)
parser.add_argument('--save_dir', type=str, default='./dumped_models', help='Root dir for saving models.')
parser.add_argument('--info', type=str, default='KBP_default_ATT', help='Optional info for the experiment.')
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
with open(args.vocab_dir + '/vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
word2id = {}
for idx, word in enumerate(vocab):
word2id[word] = idx
emb_file = args.vocab_dir + '/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == len(vocab)
assert emb_matrix.shape[1] == args.emb_dim
args.vocab_size = len(vocab)
niter = args.num_epoch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Using device: %s' % device.type)
print('Reading data......')
rel2id = utils.load_rel2id('%s/relation2id.json' % args.data_dir)
train_filename = '%s/train.json' % args.data_dir
test_filename = '%s/test.json' % args.data_dir
dev_filename = '%s/dev.json' % args.data_dir
train_dset = Dataset(train_filename, args, word2id, device, rel2id=rel2id, shuffle=True, use_bag=True)
test_dset = Dataset(test_filename, args, word2id, device, rel2id=rel2id, use_bag=False)
dev_dset = Dataset(dev_filename, args, word2id, device, rel2id=rel2id, use_bag=False)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
for runid in range(1, args.repeat + 1):
logging.info('Run model %d times......' % runid)
dev_f1, test_result = train(args)
logging.info('')
| true | true |
f73e7218a3eaf4998f4b32f6977df96bbf9d95ba | 70,207 | py | Python | ThirdParty/Twisted/twisted/internet/test/test_tcp.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | 1 | 2021-10-13T01:57:14.000Z | 2021-10-13T01:57:14.000Z | ThirdParty/Twisted/twisted/internet/test/test_tcp.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | null | null | null | ThirdParty/Twisted/twisted/internet/test/test_tcp.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | 5 | 2015-10-09T04:12:29.000Z | 2021-12-15T16:57:11.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTCP} and the TCP parts of
L{IReactorSocket}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
import socket, errno
from zope.interface import implementer
from twisted.python.compat import _PY3
from twisted.python.runtime import platform
from twisted.python.failure import Failure
from twisted.python import log
from twisted.trial.unittest import SkipTest, TestCase
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.error import (
ConnectionLost, UserError, ConnectionRefusedError, ConnectionDone,
ConnectionAborted)
from twisted.internet.interfaces import (
ILoggingContext, IConnector, IReactorFDSet, IReactorSocket, IReactorTCP)
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.defer import (
Deferred, DeferredList, maybeDeferred, gatherResults)
from twisted.internet._endpointspy3 import (
TCP4ServerEndpoint, TCP4ClientEndpoint)
from twisted.internet.protocol import ServerFactory, ClientFactory, Protocol
from twisted.internet.interfaces import (
IPushProducer, IPullProducer, IHalfCloseableProtocol)
from twisted.internet.tcp import Connection, Server, _resolveIPv6
from twisted.internet.test.connectionmixins import (
LogObserverMixin, ConnectionTestsMixin, TCPClientTestsMixin, findFreePort,
ConnectableProtocol, EndpointCreator, runProtocolsWithReactor)
from twisted.internet.test.test_core import ObjectModelIntegrationMixin
from twisted.test.test_tcp import MyClientFactory, MyServerFactory
from twisted.test.test_tcp import ClosingFactory, ClientStartStopFactory
try:
from OpenSSL import SSL
except ImportError:
useSSL = False
else:
from twisted.internet.ssl import ClientContextFactory
useSSL = True
try:
socket.socket(socket.AF_INET6, socket.SOCK_STREAM).close()
except socket.error as e:
ipv6Skip = str(e)
else:
ipv6Skip = None
if platform.isWindows():
from twisted.internet.test import _win32ifaces
getLinkLocalIPv6Addresses = _win32ifaces.win32GetLinkLocalIPv6Addresses
else:
try:
from twisted.internet.test import _posixifaces
except ImportError:
getLinkLocalIPv6Addresses = lambda: []
else:
getLinkLocalIPv6Addresses = _posixifaces.posixGetLinkLocalIPv6Addresses
def getLinkLocalIPv6Address():
"""
Find and return a configured link local IPv6 address including a scope
identifier using the % separation syntax. If the system has no link local
IPv6 addresses, raise L{SkipTest} instead.
@raise SkipTest: if no link local address can be found or if the
C{netifaces} module is not available.
@return: a C{str} giving the address
"""
addresses = getLinkLocalIPv6Addresses()
if addresses:
return addresses[0]
raise SkipTest("Link local IPv6 address unavailable")
def connect(client, destination):
"""
Connect a socket to the given destination.
@param client: A C{socket.socket}.
@param destination: A tuple of (host, port). The host is a C{str}, the
port a C{int}. If the C{host} is an IPv6 IP, the address is resolved
using C{getaddrinfo} and the first version found is used.
"""
(host, port) = destination
if '%' in host or ':' in host:
address = socket.getaddrinfo(host, port)[0][4]
else:
address = (host, port)
client.connect(address)
class FakeSocket(object):
"""
A fake for L{socket.socket} objects.
@ivar data: A C{str} giving the data which will be returned from
L{FakeSocket.recv}.
@ivar sendBuffer: A C{list} of the objects passed to L{FakeSocket.send}.
"""
def __init__(self, data):
self.data = data
self.sendBuffer = []
def setblocking(self, blocking):
self.blocking = blocking
def recv(self, size):
return self.data
def send(self, bytes):
"""
I{Send} all of C{bytes} by accumulating it into C{self.sendBuffer}.
@return: The length of C{bytes}, indicating all the data has been
accepted.
"""
self.sendBuffer.append(bytes)
return len(bytes)
def shutdown(self, how):
"""
Shutdown is not implemented. The method is provided since real sockets
have it and some code expects it. No behavior of L{FakeSocket} is
affected by a call to it.
"""
def close(self):
"""
Close is not implemented. The method is provided since real sockets
have it and some code expects it. No behavior of L{FakeSocket} is
affected by a call to it.
"""
def setsockopt(self, *args):
"""
Setsockopt is not implemented. The method is provided since
real sockets have it and some code expects it. No behavior of
L{FakeSocket} is affected by a call to it.
"""
def fileno(self):
"""
Return a fake file descriptor. If actually used, this will have no
connection to this L{FakeSocket} and will probably cause surprising
results.
"""
return 1
class TestFakeSocket(TestCase):
"""
Test that the FakeSocket can be used by the doRead method of L{Connection}
"""
def test_blocking(self):
skt = FakeSocket(b"someData")
skt.setblocking(0)
self.assertEqual(skt.blocking, 0)
def test_recv(self):
skt = FakeSocket(b"someData")
self.assertEqual(skt.recv(10), b"someData")
def test_send(self):
"""
L{FakeSocket.send} accepts the entire string passed to it, adds it to
its send buffer, and returns its length.
"""
skt = FakeSocket(b"")
count = skt.send(b"foo")
self.assertEqual(count, 3)
self.assertEqual(skt.sendBuffer, [b"foo"])
class FakeProtocol(Protocol):
"""
An L{IProtocol} that returns a value from its dataReceived method.
"""
def dataReceived(self, data):
"""
Return something other than C{None} to trigger a deprecation warning for
that behavior.
"""
return ()
@implementer(IReactorFDSet)
class _FakeFDSetReactor(object):
"""
A no-op implementation of L{IReactorFDSet}, which ignores all adds and
removes.
"""
addReader = addWriter = removeReader = removeWriter = (
lambda self, desc: None)
class TCPServerTests(TestCase):
"""
Whitebox tests for L{twisted.internet.tcp.Server}.
"""
def setUp(self):
self.reactor = _FakeFDSetReactor()
class FakePort(object):
_realPortNumber = 3
self.skt = FakeSocket(b"")
self.protocol = Protocol()
self.server = Server(
self.skt, self.protocol, ("", 0), FakePort(), None, self.reactor)
def test_writeAfterDisconnect(self):
"""
L{Server.write} discards bytes passed to it if called after it has lost
its connection.
"""
self.server.connectionLost(
Failure(Exception("Simulated lost connection")))
self.server.write(b"hello world")
self.assertEqual(self.skt.sendBuffer, [])
def test_writeAfteDisconnectAfterTLS(self):
"""
L{Server.write} discards bytes passed to it if called after it has lost
its connection when the connection had started TLS.
"""
self.server.TLS = True
self.test_writeAfterDisconnect()
def test_writeSequenceAfterDisconnect(self):
"""
L{Server.writeSequence} discards bytes passed to it if called after it
has lost its connection.
"""
self.server.connectionLost(
Failure(Exception("Simulated lost connection")))
self.server.writeSequence([b"hello world"])
self.assertEqual(self.skt.sendBuffer, [])
def test_writeSequenceAfteDisconnectAfterTLS(self):
"""
L{Server.writeSequence} discards bytes passed to it if called after it
has lost its connection when the connection had started TLS.
"""
self.server.TLS = True
self.test_writeSequenceAfterDisconnect()
class TCPConnectionTests(TestCase):
"""
Whitebox tests for L{twisted.internet.tcp.Connection}.
"""
def test_doReadWarningIsRaised(self):
"""
When an L{IProtocol} implementation that returns a value from its
C{dataReceived} method, a deprecated warning is emitted.
"""
skt = FakeSocket(b"someData")
protocol = FakeProtocol()
conn = Connection(skt, protocol)
conn.doRead()
warnings = self.flushWarnings([FakeProtocol.dataReceived])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]["message"],
"Returning a value other than None from "
"twisted.internet.test.test_tcp.FakeProtocol.dataReceived "
"is deprecated since Twisted 11.0.0.")
self.assertEqual(len(warnings), 1)
def test_noTLSBeforeStartTLS(self):
"""
The C{TLS} attribute of a L{Connection} instance is C{False} before
L{Connection.startTLS} is called.
"""
skt = FakeSocket(b"")
protocol = FakeProtocol()
conn = Connection(skt, protocol)
self.assertFalse(conn.TLS)
def test_tlsAfterStartTLS(self):
"""
The C{TLS} attribute of a L{Connection} instance is C{True} after
L{Connection.startTLS} is called.
"""
skt = FakeSocket(b"")
protocol = FakeProtocol()
conn = Connection(skt, protocol, reactor=_FakeFDSetReactor())
conn._tlsClientDefault = True
conn.startTLS(ClientContextFactory(), True)
self.assertTrue(conn.TLS)
if not useSSL:
test_tlsAfterStartTLS.skip = "No SSL support available"
class TCPCreator(EndpointCreator):
"""
Create IPv4 TCP endpoints for L{runProtocolsWithReactor}-based tests.
"""
interface = "127.0.0.1"
def server(self, reactor):
"""
Create a server-side TCP endpoint.
"""
return TCP4ServerEndpoint(reactor, 0, interface=self.interface)
def client(self, reactor, serverAddress):
"""
Create a client end point that will connect to the given address.
@type serverAddress: L{IPv4Address}
"""
return TCP4ClientEndpoint(reactor, self.interface, serverAddress.port)
class TCP6Creator(TCPCreator):
"""
Create IPv6 TCP endpoints for
C{ReactorBuilder.runProtocolsWithReactor}-based tests.
The endpoint types in question here are still the TCP4 variety, since
these simply pass through IPv6 address literals to the reactor, and we are
only testing address literals, not name resolution (as name resolution has
not yet been implemented). See http://twistedmatrix.com/trac/ticket/4470
for more specific information about new endpoint classes. The naming is
slightly misleading, but presumably if you're passing an IPv6 literal, you
know what you're asking for.
"""
def __init__(self):
self.interface = getLinkLocalIPv6Address()
class TCPClientTestsBase(ReactorBuilder, ConnectionTestsMixin,
TCPClientTestsMixin):
"""
Base class for builders defining tests related to L{IReactorTCP.connectTCP}.
"""
requiredInterfaces = (IReactorTCP,)
port = 1234
@property
def interface(self):
"""
Return the interface attribute from the endpoints object.
"""
return self.endpoints.interface
class TCP4ClientTestsBuilder(TCPClientTestsBase):
"""
Builder configured with IPv4 parameters for tests related to
L{IReactorTCP.connectTCP}.
"""
fakeDomainName = 'some-fake.domain.example.com'
family = socket.AF_INET
addressClass = IPv4Address
endpoints = TCPCreator()
class TCP6ClientTestsBuilder(TCPClientTestsBase):
"""
Builder configured with IPv6 parameters for tests related to
L{IReactorTCP.connectTCP}.
"""
if ipv6Skip:
skip = ipv6Skip
family = socket.AF_INET6
addressClass = IPv6Address
def setUp(self):
# Only create this object here, so that it won't be created if tests
# are being skipped:
self.endpoints = TCP6Creator()
# This is used by test_addresses to test the distinction between the
# resolved name and the name on the socket itself. All the same
# invariants should hold, but giving back an IPv6 address from a
# resolver is not something the reactor can handle, so instead, we make
# it so that the connect call for the IPv6 address test simply uses an
# address literal.
self.fakeDomainName = self.endpoints.interface
class TCPConnectorTestsBuilder(ReactorBuilder):
"""
Tests for the L{IConnector} provider returned by L{IReactorTCP.connectTCP}.
"""
requiredInterfaces = (IReactorTCP,)
def test_connectorIdentity(self):
"""
L{IReactorTCP.connectTCP} returns an object which provides
L{IConnector}. The destination of the connector is the address which
was passed to C{connectTCP}. The same connector object is passed to
the factory's C{startedConnecting} method as to the factory's
C{clientConnectionLost} method.
"""
serverFactory = ClosingFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
serverFactory.port = tcpPort
portNumber = tcpPort.getHost().port
seenConnectors = []
seenFailures = []
clientFactory = ClientStartStopFactory()
clientFactory.clientConnectionLost = (
lambda connector, reason: (seenConnectors.append(connector),
seenFailures.append(reason)))
clientFactory.startedConnecting = seenConnectors.append
connector = reactor.connectTCP(self.interface, portNumber,
clientFactory)
self.assertTrue(IConnector.providedBy(connector))
dest = connector.getDestination()
self.assertEqual(dest.type, "TCP")
self.assertEqual(dest.host, self.interface)
self.assertEqual(dest.port, portNumber)
clientFactory.whenStopped.addBoth(lambda _: reactor.stop())
self.runReactor(reactor)
seenFailures[0].trap(ConnectionDone)
self.assertEqual(seenConnectors, [connector, connector])
def test_userFail(self):
"""
Calling L{IConnector.stopConnecting} in C{Factory.startedConnecting}
results in C{Factory.clientConnectionFailed} being called with
L{error.UserError} as the reason.
"""
serverFactory = MyServerFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
portNumber = tcpPort.getHost().port
fatalErrors = []
def startedConnecting(connector):
try:
connector.stopConnecting()
except Exception:
fatalErrors.append(Failure())
reactor.stop()
clientFactory = ClientStartStopFactory()
clientFactory.startedConnecting = startedConnecting
clientFactory.whenStopped.addBoth(lambda _: reactor.stop())
reactor.callWhenRunning(lambda: reactor.connectTCP(self.interface,
portNumber,
clientFactory))
self.runReactor(reactor)
if fatalErrors:
self.fail(fatalErrors[0].getTraceback())
clientFactory.reason.trap(UserError)
self.assertEqual(clientFactory.failed, 1)
def test_reconnect(self):
"""
Calling L{IConnector.connect} in C{Factory.clientConnectionLost} causes
a new connection attempt to be made.
"""
serverFactory = ClosingFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
serverFactory.port = tcpPort
portNumber = tcpPort.getHost().port
clientFactory = MyClientFactory()
def clientConnectionLost(connector, reason):
connector.connect()
clientFactory.clientConnectionLost = clientConnectionLost
reactor.connectTCP(self.interface, portNumber, clientFactory)
protocolMadeAndClosed = []
def reconnectFailed(ignored):
p = clientFactory.protocol
protocolMadeAndClosed.append((p.made, p.closed))
reactor.stop()
clientFactory.failDeferred.addCallback(reconnectFailed)
self.runReactor(reactor)
clientFactory.reason.trap(ConnectionRefusedError)
self.assertEqual(protocolMadeAndClosed, [(1, 1)])
class TCP4ConnectorTestsBuilder(TCPConnectorTestsBuilder):
interface = '127.0.0.1'
family = socket.AF_INET
addressClass = IPv4Address
class TCP6ConnectorTestsBuilder(TCPConnectorTestsBuilder):
family = socket.AF_INET6
addressClass = IPv6Address
if ipv6Skip:
skip = ipv6Skip
def setUp(self):
self.interface = getLinkLocalIPv6Address()
def createTestSocket(test, addressFamily, socketType):
"""
Create a socket for the duration of the given test.
@param test: the test to add cleanup to.
@param addressFamily: an C{AF_*} constant
@param socketType: a C{SOCK_*} constant.
@return: a socket object.
"""
skt = socket.socket(addressFamily, socketType)
test.addCleanup(skt.close)
return skt
class StreamTransportTestsMixin(LogObserverMixin):
"""
Mixin defining tests which apply to any port/connection based transport.
"""
def test_startedListeningLogMessage(self):
"""
When a port starts, a message including a description of the associated
factory is logged.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
@implementer(ILoggingContext)
class SomeFactory(ServerFactory):
def logPrefix(self):
return "Crazy Factory"
factory = SomeFactory()
p = self.getListeningPort(reactor, factory)
expectedMessage = self.getExpectedStartListeningLogMessage(
p, "Crazy Factory")
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_connectionLostLogMsg(self):
"""
When a connection is lost, an informative message should be logged
(see L{getExpectedConnectionLostLogMsg}): an address identifying
the port and the fact that it was closed.
"""
loggedMessages = []
def logConnectionLostMsg(eventDict):
loggedMessages.append(log.textFromEventDict(eventDict))
reactor = self.buildReactor()
p = self.getListeningPort(reactor, ServerFactory())
expectedMessage = self.getExpectedConnectionLostLogMsg(p)
log.addObserver(logConnectionLostMsg)
def stopReactor(ignored):
log.removeObserver(logConnectionLostMsg)
reactor.stop()
def doStopListening():
log.addObserver(logConnectionLostMsg)
maybeDeferred(p.stopListening).addCallback(stopReactor)
reactor.callWhenRunning(doStopListening)
reactor.run()
self.assertIn(expectedMessage, loggedMessages)
def test_allNewStyle(self):
"""
The L{IListeningPort} object is an instance of a class with no
classic classes in its hierarchy.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory())
self.assertFullyNewStyle(port)
class ListenTCPMixin(object):
"""
Mixin which uses L{IReactorTCP.listenTCP} to hand out listening TCP ports.
"""
def getListeningPort(self, reactor, factory, port=0, interface=''):
"""
Get a TCP port from a reactor.
"""
return reactor.listenTCP(port, factory, interface=interface)
class SocketTCPMixin(object):
"""
Mixin which uses L{IReactorSocket.adoptStreamPort} to hand out listening TCP
ports.
"""
def getListeningPort(self, reactor, factory, port=0, interface=''):
"""
Get a TCP port from a reactor, wrapping an already-initialized file
descriptor.
"""
if IReactorSocket.providedBy(reactor):
if ':' in interface:
domain = socket.AF_INET6
address = socket.getaddrinfo(interface, port)[0][4]
else:
domain = socket.AF_INET
address = (interface, port)
portSock = socket.socket(domain)
portSock.bind(address)
portSock.listen(3)
portSock.setblocking(False)
try:
return reactor.adoptStreamPort(
portSock.fileno(), portSock.family, factory)
finally:
# The socket should still be open; fileno will raise if it is
# not.
portSock.fileno()
# Now clean it up, because the rest of the test does not need
# it.
portSock.close()
else:
raise SkipTest("Reactor does not provide IReactorSocket")
class TCPPortTestsMixin(object):
"""
Tests for L{IReactorTCP.listenTCP}
"""
requiredInterfaces = (IReactorTCP,)
def getExpectedStartListeningLogMessage(self, port, factory):
"""
Get the message expected to be logged when a TCP port starts listening.
"""
return "%s starting on %d" % (
factory, port.getHost().port)
def getExpectedConnectionLostLogMsg(self, port):
"""
Get the expected connection lost message for a TCP port.
"""
return "(TCP Port %s Closed)" % (port.getHost().port,)
def test_portGetHostOnIPv4(self):
"""
When no interface is passed to L{IReactorTCP.listenTCP}, the returned
listening port listens on an IPv4 address.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory())
address = port.getHost()
self.assertIsInstance(address, IPv4Address)
def test_portGetHostOnIPv6(self):
"""
When listening on an IPv6 address, L{IListeningPort.getHost} returns
an L{IPv6Address} with C{host} and C{port} attributes reflecting the
address the port is bound to.
"""
reactor = self.buildReactor()
host, portNumber = findFreePort(
family=socket.AF_INET6, interface='::1')[:2]
port = self.getListeningPort(
reactor, ServerFactory(), portNumber, host)
address = port.getHost()
self.assertIsInstance(address, IPv6Address)
self.assertEqual('::1', address.host)
self.assertEqual(portNumber, address.port)
if ipv6Skip:
test_portGetHostOnIPv6.skip = ipv6Skip
def test_portGetHostOnIPv6ScopeID(self):
"""
When a link-local IPv6 address including a scope identifier is passed as
the C{interface} argument to L{IReactorTCP.listenTCP}, the resulting
L{IListeningPort} reports its address as an L{IPv6Address} with a host
value that includes the scope identifier.
"""
linkLocal = getLinkLocalIPv6Address()
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory(), 0, linkLocal)
address = port.getHost()
self.assertIsInstance(address, IPv6Address)
self.assertEqual(linkLocal, address.host)
if ipv6Skip:
test_portGetHostOnIPv6ScopeID.skip = ipv6Skip
def _buildProtocolAddressTest(self, client, interface):
"""
Connect C{client} to a server listening on C{interface} started with
L{IReactorTCP.listenTCP} and return the address passed to the factory's
C{buildProtocol} method.
@param client: A C{SOCK_STREAM} L{socket.socket} created with an address
family such that it will be able to connect to a server listening on
C{interface}.
@param interface: A C{str} giving an address for a server to listen on.
This should almost certainly be the loopback address for some
address family supported by L{IReactorTCP.listenTCP}.
@return: Whatever object, probably an L{IAddress} provider, is passed to
a server factory's C{buildProtocol} method when C{client}
establishes a connection.
"""
class ObserveAddress(ServerFactory):
def buildProtocol(self, address):
reactor.stop()
self.observedAddress = address
return Protocol()
factory = ObserveAddress()
reactor = self.buildReactor()
port = self.getListeningPort(reactor, factory, 0, interface)
client.setblocking(False)
try:
connect(client, (port.getHost().host, port.getHost().port))
except socket.error as e:
errnum, message = e.args
self.assertIn(errnum, (errno.EINPROGRESS, errno.EWOULDBLOCK))
self.runReactor(reactor)
return factory.observedAddress
def test_buildProtocolIPv4Address(self):
"""
When a connection is accepted over IPv4, an L{IPv4Address} is passed
to the factory's C{buildProtocol} method giving the peer's address.
"""
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv4Address('TCP', *client.getsockname()), observedAddress)
def test_buildProtocolIPv6Address(self):
"""
When a connection is accepted to an IPv6 address, an L{IPv6Address} is
passed to the factory's C{buildProtocol} method giving the peer's
address.
"""
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), observedAddress)
if ipv6Skip:
test_buildProtocolIPv6Address.skip = ipv6Skip
def test_buildProtocolIPv6AddressScopeID(self):
"""
When a connection is accepted to a link-local IPv6 address, an
L{IPv6Address} is passed to the factory's C{buildProtocol} method
giving the peer's address, including a scope identifier.
"""
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), observedAddress)
if ipv6Skip:
test_buildProtocolIPv6AddressScopeID.skip = ipv6Skip
def _serverGetConnectionAddressTest(self, client, interface, which):
"""
Connect C{client} to a server listening on C{interface} started with
L{IReactorTCP.listenTCP} and return the address returned by one of the
server transport's address lookup methods, C{getHost} or C{getPeer}.
@param client: A C{SOCK_STREAM} L{socket.socket} created with an address
family such that it will be able to connect to a server listening on
C{interface}.
@param interface: A C{str} giving an address for a server to listen on.
This should almost certainly be the loopback address for some
address family supported by L{IReactorTCP.listenTCP}.
@param which: A C{str} equal to either C{"getHost"} or C{"getPeer"}
determining which address will be returned.
@return: Whatever object, probably an L{IAddress} provider, is returned
from the method indicated by C{which}.
"""
class ObserveAddress(Protocol):
def makeConnection(self, transport):
reactor.stop()
self.factory.address = getattr(transport, which)()
reactor = self.buildReactor()
factory = ServerFactory()
factory.protocol = ObserveAddress
port = self.getListeningPort(reactor, factory, 0, interface)
client.setblocking(False)
try:
connect(client, (port.getHost().host, port.getHost().port))
except socket.error as e:
errnum, message = e.args
self.assertIn(errnum, (errno.EINPROGRESS, errno.EWOULDBLOCK))
self.runReactor(reactor)
return factory.address
def test_serverGetHostOnIPv4(self):
"""
When a connection is accepted over IPv4, the server
L{ITransport.getHost} method returns an L{IPv4Address} giving the
address on which the server accepted the connection.
"""
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv4Address('TCP', *client.getpeername()), hostAddress)
def test_serverGetHostOnIPv6(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getHost} method returns an L{IPv6Address} giving the
address on which the server accepted the connection.
"""
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv6Address('TCP', *client.getpeername()[:2]), hostAddress)
if ipv6Skip:
test_serverGetHostOnIPv6.skip = ipv6Skip
def test_serverGetHostOnIPv6ScopeID(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getHost} method returns an L{IPv6Address} giving the
address on which the server accepted the connection, including the scope
identifier.
"""
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv6Address('TCP', *client.getpeername()[:2]), hostAddress)
if ipv6Skip:
test_serverGetHostOnIPv6ScopeID.skip = ipv6Skip
def test_serverGetPeerOnIPv4(self):
"""
When a connection is accepted over IPv4, the server
L{ITransport.getPeer} method returns an L{IPv4Address} giving the
address of the remote end of the connection.
"""
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv4Address('TCP', *client.getsockname()), peerAddress)
def test_serverGetPeerOnIPv6(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getPeer} method returns an L{IPv6Address} giving the
address on the remote end of the connection.
"""
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), peerAddress)
if ipv6Skip:
test_serverGetPeerOnIPv6.skip = ipv6Skip
def test_serverGetPeerOnIPv6ScopeID(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getPeer} method returns an L{IPv6Address} giving the
address on the remote end of the connection, including the scope
identifier.
"""
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), peerAddress)
if ipv6Skip:
test_serverGetPeerOnIPv6ScopeID.skip = ipv6Skip
class TCPPortTestsBuilder(ReactorBuilder, ListenTCPMixin, TCPPortTestsMixin,
ObjectModelIntegrationMixin,
StreamTransportTestsMixin):
pass
class TCPFDPortTestsBuilder(ReactorBuilder, SocketTCPMixin, TCPPortTestsMixin,
ObjectModelIntegrationMixin,
StreamTransportTestsMixin):
pass
class StopStartReadingProtocol(Protocol):
"""
Protocol that pauses and resumes the transport a few times
"""
def connectionMade(self):
self.data = b''
self.pauseResumeProducing(3)
def pauseResumeProducing(self, counter):
"""
Toggle transport read state, then count down.
"""
self.transport.pauseProducing()
self.transport.resumeProducing()
if counter:
self.factory.reactor.callLater(0,
self.pauseResumeProducing, counter - 1)
else:
self.factory.reactor.callLater(0,
self.factory.ready.callback, self)
def dataReceived(self, data):
log.msg('got data', len(data))
self.data += data
if len(self.data) == 4*4096:
self.factory.stop.callback(self.data)
class TCPConnectionTestsBuilder(ReactorBuilder):
"""
Builder defining tests relating to L{twisted.internet.tcp.Connection}.
"""
requiredInterfaces = (IReactorTCP,)
def test_stopStartReading(self):
"""
This test verifies transport socket read state after multiple
pause/resumeProducing calls.
"""
sf = ServerFactory()
reactor = sf.reactor = self.buildReactor()
skippedReactors = ["Glib2Reactor", "Gtk2Reactor"]
reactorClassName = reactor.__class__.__name__
if reactorClassName in skippedReactors and platform.isWindows():
raise SkipTest(
"This test is broken on gtk/glib under Windows.")
sf.protocol = StopStartReadingProtocol
sf.ready = Deferred()
sf.stop = Deferred()
p = reactor.listenTCP(0, sf)
port = p.getHost().port
def proceed(protos, port):
"""
Send several IOCPReactor's buffers' worth of data.
"""
self.assertTrue(protos[0])
self.assertTrue(protos[1])
protos = protos[0][1], protos[1][1]
protos[0].transport.write(b'x' * (2 * 4096) + b'y' * (2 * 4096))
return (sf.stop.addCallback(cleanup, protos, port)
.addCallback(lambda ign: reactor.stop()))
def cleanup(data, protos, port):
"""
Make sure IOCPReactor didn't start several WSARecv operations
that clobbered each other's results.
"""
self.assertEqual(data, b'x'*(2*4096) + b'y'*(2*4096),
'did not get the right data')
return DeferredList([
maybeDeferred(protos[0].transport.loseConnection),
maybeDeferred(protos[1].transport.loseConnection),
maybeDeferred(port.stopListening)])
cc = TCP4ClientEndpoint(reactor, '127.0.0.1', port)
cf = ClientFactory()
cf.protocol = Protocol
d = DeferredList([cc.connect(cf), sf.ready]).addCallback(proceed, p)
d.addErrback(log.err)
self.runReactor(reactor)
def test_connectionLostAfterPausedTransport(self):
"""
Alice connects to Bob. Alice writes some bytes and then shuts down the
connection. Bob receives the bytes from the connection and then pauses
the transport object. Shortly afterwards Bob resumes the transport
object. At that point, Bob is notified that the connection has been
closed.
This is no problem for most reactors. The underlying event notification
API will probably just remind them that the connection has been closed.
It is a little tricky for win32eventreactor (MsgWaitForMultipleObjects).
MsgWaitForMultipleObjects will only deliver the close notification once.
The reactor needs to remember that notification until Bob resumes the
transport.
"""
class Pauser(ConnectableProtocol):
def __init__(self):
self.events = []
def dataReceived(self, bytes):
self.events.append("paused")
self.transport.pauseProducing()
self.reactor.callLater(0, self.resume)
def resume(self):
self.events.append("resumed")
self.transport.resumeProducing()
def connectionLost(self, reason):
# This is the event you have been waiting for.
self.events.append("lost")
ConnectableProtocol.connectionLost(self, reason)
class Client(ConnectableProtocol):
def connectionMade(self):
self.transport.write(b"some bytes for you")
self.transport.loseConnection()
pauser = Pauser()
runProtocolsWithReactor(self, pauser, Client(), TCPCreator())
self.assertEqual(pauser.events, ["paused", "resumed", "lost"])
def test_doubleHalfClose(self):
"""
If one side half-closes its connection, and then the other side of the
connection calls C{loseWriteConnection}, and then C{loseConnection} in
{writeConnectionLost}, the connection is closed correctly.
This rather obscure case used to fail (see ticket #3037).
"""
@implementer(IHalfCloseableProtocol)
class ListenerProtocol(ConnectableProtocol):
def readConnectionLost(self):
self.transport.loseWriteConnection()
def writeConnectionLost(self):
self.transport.loseConnection()
class Client(ConnectableProtocol):
def connectionMade(self):
self.transport.loseConnection()
# If test fails, reactor won't stop and we'll hit timeout:
runProtocolsWithReactor(
self, ListenerProtocol(), Client(), TCPCreator())
class WriteSequenceTestsMixin(object):
"""
Test for L{twisted.internet.abstract.FileDescriptor.writeSequence}.
"""
requiredInterfaces = (IReactorTCP,)
def setWriteBufferSize(self, transport, value):
"""
Set the write buffer size for the given transport, mananing possible
differences (ie, IOCP). Bug #4322 should remove the need of that hack.
"""
if getattr(transport, "writeBufferSize", None) is not None:
transport.writeBufferSize = value
else:
transport.bufferSize = value
def test_writeSequeceWithoutWrite(self):
"""
C{writeSequence} sends the data even if C{write} hasn't been called.
"""
def connected(protocols):
client, server, port = protocols
def dataReceived(data):
log.msg("data received: %r" % data)
self.assertEqual(data, b"Some sequence splitted")
client.transport.loseConnection()
server.dataReceived = dataReceived
client.transport.writeSequence([b"Some ", b"sequence ", b"splitted"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_writeSequenceWithUnicodeRaisesException(self):
"""
C{writeSequence} with an element in the sequence of type unicode raises
C{TypeError}.
"""
def connected(protocols):
client, server, port = protocols
exc = self.assertRaises(
TypeError,
server.transport.writeSequence, [u"Unicode is not kosher"])
self.assertEqual(str(exc), "Data must not be unicode")
server.transport.loseConnection()
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_streamingProducer(self):
"""
C{writeSequence} pauses its streaming producer if too much data is
buffered, and then resumes it.
"""
@implementer(IPushProducer)
class SaveActionProducer(object):
client = None
server = None
def __init__(self):
self.actions = []
def pauseProducing(self):
self.actions.append("pause")
def resumeProducing(self):
self.actions.append("resume")
# Unregister the producer so the connection can close
self.client.transport.unregisterProducer()
# This is why the code below waits for the server connection
# first - so we have it to close here. We close the server
# side because win32evenreactor cannot reliably observe us
# closing the client side (#5285).
self.server.transport.loseConnection()
def stopProducing(self):
self.actions.append("stop")
producer = SaveActionProducer()
def connected(protocols):
client, server = protocols[:2]
producer.client = client
producer.server = server
# Register a streaming producer and verify that it gets paused
# after it writes more than the local send buffer can hold.
client.transport.registerProducer(producer, True)
self.assertEqual(producer.actions, [])
self.setWriteBufferSize(client.transport, 500)
client.transport.writeSequence([b"x" * 50] * 20)
self.assertEqual(producer.actions, ["pause"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
# After the send buffer gets a chance to empty out a bit, the producer
# should be resumed.
self.assertEqual(producer.actions, ["pause", "resume"])
def test_nonStreamingProducer(self):
"""
C{writeSequence} pauses its producer if too much data is buffered only
if this is a streaming producer.
"""
test = self
@implementer(IPullProducer)
class SaveActionProducer(object):
client = None
def __init__(self):
self.actions = []
def resumeProducing(self):
self.actions.append("resume")
if self.actions.count("resume") == 2:
self.client.transport.stopConsuming()
else:
test.setWriteBufferSize(self.client.transport, 500)
self.client.transport.writeSequence([b"x" * 50] * 20)
def stopProducing(self):
self.actions.append("stop")
producer = SaveActionProducer()
def connected(protocols):
client = protocols[0]
producer.client = client
# Register a non-streaming producer and verify that it is resumed
# immediately.
client.transport.registerProducer(producer, False)
self.assertEqual(producer.actions, ["resume"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
# After the local send buffer empties out, the producer should be
# resumed again.
self.assertEqual(producer.actions, ["resume", "resume"])
class TCPTransportServerAddressTestMixin(object):
"""
Test mixing for TCP server address building and log prefix.
"""
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
"""
Helper method returnine a L{Deferred} firing with a tuple of a client
protocol, a server protocol, and a running TCP port.
"""
raise NotImplementedError()
def _testServerAddress(self, interface, addressFamily, adressClass):
"""
Helper method to test TCP server addresses on either IPv4 or IPv6.
"""
def connected(protocols):
client, server, port = protocols
try:
self.assertEqual(
"<AccumulatingProtocol #%s on %s>" %
(server.transport.sessionno, port.getHost().port),
str(server.transport))
self.assertEqual(
"AccumulatingProtocol,%s,%s" %
(server.transport.sessionno, interface),
server.transport.logstr)
[peerAddress] = server.factory.peerAddresses
self.assertIsInstance(peerAddress, adressClass)
self.assertEqual('TCP', peerAddress.type)
self.assertEqual(interface, peerAddress.host)
finally:
# Be certain to drop the connection so the test completes.
server.transport.loseConnection()
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, interface, addressFamily)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_serverAddressTCP4(self):
"""
L{Server} instances have a string representation indicating on which
port they're running, and the connected address is stored on the
C{peerAddresses} attribute of the factory.
"""
return self._testServerAddress("127.0.0.1", socket.AF_INET,
IPv4Address)
def test_serverAddressTCP6(self):
"""
IPv6 L{Server} instances have a string representation indicating on
which port they're running, and the connected address is stored on the
C{peerAddresses} attribute of the factory.
"""
return self._testServerAddress(getLinkLocalIPv6Address(),
socket.AF_INET6, IPv6Address)
if ipv6Skip:
test_serverAddressTCP6.skip = ipv6Skip
class TCPTransportTestsBuilder(TCPTransportServerAddressTestMixin,
WriteSequenceTestsMixin, ReactorBuilder):
"""
Test standard L{ITCPTransport}s built with C{listenTCP} and C{connectTCP}.
"""
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
"""
Return a L{Deferred} firing with a L{MyClientFactory} and
L{MyServerFactory} connected pair, and the listening C{Port}.
"""
server = MyServerFactory()
server.protocolConnectionMade = Deferred()
server.protocolConnectionLost = Deferred()
client = MyClientFactory()
client.protocolConnectionMade = Deferred()
client.protocolConnectionLost = Deferred()
port = reactor.listenTCP(0, server, interface=interface)
lostDeferred = gatherResults([client.protocolConnectionLost,
server.protocolConnectionLost])
def stop(result):
reactor.stop()
return result
lostDeferred.addBoth(stop)
startDeferred = gatherResults([client.protocolConnectionMade,
server.protocolConnectionMade])
deferred = Deferred()
def start(protocols):
client, server = protocols
log.msg("client connected %s" % client)
log.msg("server connected %s" % server)
deferred.callback((client, server, port))
startDeferred.addCallback(start)
reactor.connectTCP(interface, port.getHost().port, client)
return deferred
class AdoptStreamConnectionTestsBuilder(TCPTransportServerAddressTestMixin,
WriteSequenceTestsMixin,
ReactorBuilder):
"""
Test server transports built using C{adoptStreamConnection}.
"""
requiredInterfaces = (IReactorFDSet, IReactorSocket)
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
"""
Return a L{Deferred} firing with a L{MyClientFactory} and
L{MyServerFactory} connected pair, and the listening C{Port}. The
particularity is that the server protocol has been obtained after doing
a C{adoptStreamConnection} against the original server connection.
"""
firstServer = MyServerFactory()
firstServer.protocolConnectionMade = Deferred()
server = MyServerFactory()
server.protocolConnectionMade = Deferred()
server.protocolConnectionLost = Deferred()
client = MyClientFactory()
client.protocolConnectionMade = Deferred()
client.protocolConnectionLost = Deferred()
port = reactor.listenTCP(0, firstServer, interface=interface)
def firtServerConnected(proto):
reactor.removeReader(proto.transport)
reactor.removeWriter(proto.transport)
reactor.adoptStreamConnection(
proto.transport.fileno(), addressFamily, server)
firstServer.protocolConnectionMade.addCallback(firtServerConnected)
lostDeferred = gatherResults([client.protocolConnectionLost,
server.protocolConnectionLost])
def stop(result):
if reactor.running:
reactor.stop()
return result
lostDeferred.addBoth(stop)
deferred = Deferred()
deferred.addErrback(stop)
startDeferred = gatherResults([client.protocolConnectionMade,
server.protocolConnectionMade])
def start(protocols):
client, server = protocols
log.msg("client connected %s" % client)
log.msg("server connected %s" % server)
deferred.callback((client, server, port))
startDeferred.addCallback(start)
reactor.connectTCP(interface, port.getHost().port, client)
return deferred
globals().update(TCP4ClientTestsBuilder.makeTestCaseClasses())
globals().update(TCP6ClientTestsBuilder.makeTestCaseClasses())
globals().update(TCPPortTestsBuilder.makeTestCaseClasses())
globals().update(TCPFDPortTestsBuilder.makeTestCaseClasses())
globals().update(TCPConnectionTestsBuilder.makeTestCaseClasses())
globals().update(TCP4ConnectorTestsBuilder.makeTestCaseClasses())
globals().update(TCP6ConnectorTestsBuilder.makeTestCaseClasses())
globals().update(TCPTransportTestsBuilder.makeTestCaseClasses())
globals().update(AdoptStreamConnectionTestsBuilder.makeTestCaseClasses())
class ServerAbortsTwice(ConnectableProtocol):
"""
Call abortConnection() twice.
"""
def dataReceived(self, data):
self.transport.abortConnection()
self.transport.abortConnection()
class ServerAbortsThenLoses(ConnectableProtocol):
"""
Call abortConnection() followed by loseConnection().
"""
def dataReceived(self, data):
self.transport.abortConnection()
self.transport.loseConnection()
class AbortServerWritingProtocol(ConnectableProtocol):
"""
Protocol that writes data upon connection.
"""
def connectionMade(self):
"""
Tell the client that the connection is set up and it's time to abort.
"""
self.transport.write(b"ready")
class ReadAbortServerProtocol(AbortServerWritingProtocol):
"""
Server that should never receive any data, except 'X's which are written
by the other side of the connection before abortConnection, and so might
possibly arrive.
"""
def dataReceived(self, data):
if data.replace(b'X', b''):
raise Exception("Unexpectedly received data.")
class NoReadServer(ConnectableProtocol):
"""
Stop reading immediately on connection.
This simulates a lost connection that will cause the other side to time
out, and therefore call abortConnection().
"""
def connectionMade(self):
self.transport.stopReading()
class EventualNoReadServer(ConnectableProtocol):
"""
Like NoReadServer, except we Wait until some bytes have been delivered
before stopping reading. This means TLS handshake has finished, where
applicable.
"""
gotData = False
stoppedReading = False
def dataReceived(self, data):
if not self.gotData:
self.gotData = True
self.transport.registerProducer(self, False)
self.transport.write(b"hello")
def resumeProducing(self):
if self.stoppedReading:
return
self.stoppedReading = True
# We've written out the data:
self.transport.stopReading()
def pauseProducing(self):
pass
def stopProducing(self):
pass
class BaseAbortingClient(ConnectableProtocol):
"""
Base class for abort-testing clients.
"""
inReactorMethod = False
def connectionLost(self, reason):
if self.inReactorMethod:
raise RuntimeError("BUG: connectionLost was called re-entrantly!")
ConnectableProtocol.connectionLost(self, reason)
class WritingButNotAbortingClient(BaseAbortingClient):
"""
Write data, but don't abort.
"""
def connectionMade(self):
self.transport.write(b"hello")
class AbortingClient(BaseAbortingClient):
"""
Call abortConnection() after writing some data.
"""
def dataReceived(self, data):
"""
Some data was received, so the connection is set up.
"""
self.inReactorMethod = True
self.writeAndAbort()
self.inReactorMethod = False
def writeAndAbort(self):
# X is written before abortConnection, and so there is a chance it
# might arrive. Y is written after, and so no Ys should ever be
# delivered:
self.transport.write(b"X" * 10000)
self.transport.abortConnection()
self.transport.write(b"Y" * 10000)
class AbortingTwiceClient(AbortingClient):
"""
Call abortConnection() twice, after writing some data.
"""
def writeAndAbort(self):
AbortingClient.writeAndAbort(self)
self.transport.abortConnection()
class AbortingThenLosingClient(AbortingClient):
"""
Call abortConnection() and then loseConnection().
"""
def writeAndAbort(self):
AbortingClient.writeAndAbort(self)
self.transport.loseConnection()
class ProducerAbortingClient(ConnectableProtocol):
"""
Call abortConnection from doWrite, via resumeProducing.
"""
inReactorMethod = True
producerStopped = False
def write(self):
self.transport.write(b"lalala" * 127000)
self.inRegisterProducer = True
self.transport.registerProducer(self, False)
self.inRegisterProducer = False
def connectionMade(self):
self.write()
def resumeProducing(self):
self.inReactorMethod = True
if not self.inRegisterProducer:
self.transport.abortConnection()
self.inReactorMethod = False
def stopProducing(self):
self.producerStopped = True
def connectionLost(self, reason):
if not self.producerStopped:
raise RuntimeError("BUG: stopProducing() was never called.")
if self.inReactorMethod:
raise RuntimeError("BUG: connectionLost called re-entrantly!")
ConnectableProtocol.connectionLost(self, reason)
class StreamingProducerClient(ConnectableProtocol):
"""
Call abortConnection() when the other side has stopped reading.
In particular, we want to call abortConnection() only once our local
socket hits a state where it is no longer writeable. This helps emulate
the most common use case for abortConnection(), closing a connection after
a timeout, with write buffers being full.
Since it's very difficult to know when this actually happens, we just
write a lot of data, and assume at that point no more writes will happen.
"""
paused = False
extraWrites = 0
inReactorMethod = False
def connectionMade(self):
self.write()
def write(self):
"""
Write large amount to transport, then wait for a while for buffers to
fill up.
"""
self.transport.registerProducer(self, True)
for i in range(100):
self.transport.write(b"1234567890" * 32000)
def resumeProducing(self):
self.paused = False
def stopProducing(self):
pass
def pauseProducing(self):
"""
Called when local buffer fills up.
The goal is to hit the point where the local file descriptor is not
writeable (or the moral equivalent). The fact that pauseProducing has
been called is not sufficient, since that can happen when Twisted's
buffers fill up but OS hasn't gotten any writes yet. We want to be as
close as possible to every buffer (including OS buffers) being full.
So, we wait a bit more after this for Twisted to write out a few
chunks, then abortConnection.
"""
if self.paused:
return
self.paused = True
# The amount we wait is arbitrary, we just want to make sure some
# writes have happened and outgoing OS buffers filled up -- see
# http://twistedmatrix.com/trac/ticket/5303 for details:
self.reactor.callLater(0.01, self.doAbort)
def doAbort(self):
if not self.paused:
log.err(RuntimeError("BUG: We should be paused a this point."))
self.inReactorMethod = True
self.transport.abortConnection()
self.inReactorMethod = False
def connectionLost(self, reason):
# Tell server to start reading again so it knows to go away:
self.otherProtocol.transport.startReading()
ConnectableProtocol.connectionLost(self, reason)
class StreamingProducerClientLater(StreamingProducerClient):
"""
Call abortConnection() from dataReceived, after bytes have been
exchanged.
"""
def connectionMade(self):
self.transport.write(b"hello")
self.gotData = False
def dataReceived(self, data):
if not self.gotData:
self.gotData = True
self.write()
class ProducerAbortingClientLater(ProducerAbortingClient):
"""
Call abortConnection from doWrite, via resumeProducing.
Try to do so after some bytes have already been exchanged, so we
don't interrupt SSL handshake.
"""
def connectionMade(self):
# Override base class connectionMade().
pass
def dataReceived(self, data):
self.write()
class DataReceivedRaisingClient(AbortingClient):
"""
Call abortConnection(), and then throw exception, from dataReceived.
"""
def dataReceived(self, data):
self.transport.abortConnection()
raise ZeroDivisionError("ONO")
class ResumeThrowsClient(ProducerAbortingClient):
"""
Call abortConnection() and throw exception from resumeProducing().
"""
def resumeProducing(self):
if not self.inRegisterProducer:
self.transport.abortConnection()
raise ZeroDivisionError("ono!")
def connectionLost(self, reason):
# Base class assertion about stopProducing being called isn't valid;
# if the we blew up in resumeProducing, consumers are justified in
# giving up on the producer and not calling stopProducing.
ConnectableProtocol.connectionLost(self, reason)
class AbortConnectionMixin(object):
"""
Unit tests for L{ITransport.abortConnection}.
"""
# Override in subclasses, should be a EndpointCreator instance:
endpoints = None
def runAbortTest(self, clientClass, serverClass,
clientConnectionLostReason=None):
"""
A test runner utility function, which hooks up a matched pair of client
and server protocols.
We then run the reactor until both sides have disconnected, and then
verify that the right exception resulted.
"""
clientExpectedExceptions = (ConnectionAborted, ConnectionLost)
serverExpectedExceptions = (ConnectionLost, ConnectionDone)
# In TLS tests we may get SSL.Error instead of ConnectionLost,
# since we're trashing the TLS protocol layer.
if useSSL:
clientExpectedExceptions = clientExpectedExceptions + (SSL.Error,)
serverExpectedExceptions = serverExpectedExceptions + (SSL.Error,)
client = clientClass()
server = serverClass()
client.otherProtocol = server
server.otherProtocol = client
reactor = runProtocolsWithReactor(self, server, client, self.endpoints)
# Make sure everything was shutdown correctly:
self.assertEqual(reactor.removeAll(), [])
# The reactor always has a timeout added in runReactor():
delayedCalls = reactor.getDelayedCalls()
self.assertEqual(len(delayedCalls), 1, map(str, delayedCalls))
if clientConnectionLostReason is not None:
self.assertIsInstance(
client.disconnectReason.value,
(clientConnectionLostReason,) + clientExpectedExceptions)
else:
self.assertIsInstance(client.disconnectReason.value,
clientExpectedExceptions)
self.assertIsInstance(server.disconnectReason.value, serverExpectedExceptions)
def test_dataReceivedAbort(self):
"""
abortConnection() is called in dataReceived. The protocol should be
disconnected, but connectionLost should not be called re-entrantly.
"""
return self.runAbortTest(AbortingClient, ReadAbortServerProtocol)
def test_clientAbortsConnectionTwice(self):
"""
abortConnection() is called twice by client.
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(AbortingTwiceClient, ReadAbortServerProtocol)
def test_clientAbortsConnectionThenLosesConnection(self):
"""
Client calls abortConnection(), followed by loseConnection().
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(AbortingThenLosingClient,
ReadAbortServerProtocol)
def test_serverAbortsConnectionTwice(self):
"""
abortConnection() is called twice by server.
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(WritingButNotAbortingClient, ServerAbortsTwice,
clientConnectionLostReason=ConnectionLost)
def test_serverAbortsConnectionThenLosesConnection(self):
"""
Server calls abortConnection(), followed by loseConnection().
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(WritingButNotAbortingClient,
ServerAbortsThenLoses,
clientConnectionLostReason=ConnectionLost)
def test_resumeProducingAbort(self):
"""
abortConnection() is called in resumeProducing, before any bytes have
been exchanged. The protocol should be disconnected, but
connectionLost should not be called re-entrantly.
"""
self.runAbortTest(ProducerAbortingClient,
ConnectableProtocol)
def test_resumeProducingAbortLater(self):
"""
abortConnection() is called in resumeProducing, after some
bytes have been exchanged. The protocol should be disconnected.
"""
return self.runAbortTest(ProducerAbortingClientLater,
AbortServerWritingProtocol)
def test_fullWriteBuffer(self):
"""
abortConnection() triggered by the write buffer being full.
In particular, the server side stops reading. This is supposed
to simulate a realistic timeout scenario where the client
notices the server is no longer accepting data.
The protocol should be disconnected, but connectionLost should not be
called re-entrantly.
"""
self.runAbortTest(StreamingProducerClient,
NoReadServer)
def test_fullWriteBufferAfterByteExchange(self):
"""
abortConnection() is triggered by a write buffer being full.
However, this buffer is filled after some bytes have been exchanged,
allowing a TLS handshake if we're testing TLS. The connection will
then be lost.
"""
return self.runAbortTest(StreamingProducerClientLater,
EventualNoReadServer)
def test_dataReceivedThrows(self):
"""
dataReceived calls abortConnection(), and then raises an exception.
The connection will be lost, with the thrown exception
(C{ZeroDivisionError}) as the reason on the client. The idea here is
that bugs should not be masked by abortConnection, in particular
unexpected exceptions.
"""
self.runAbortTest(DataReceivedRaisingClient,
AbortServerWritingProtocol,
clientConnectionLostReason=ZeroDivisionError)
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(errors), 1)
def test_resumeProducingThrows(self):
"""
resumeProducing calls abortConnection(), and then raises an exception.
The connection will be lost, with the thrown exception
(C{ZeroDivisionError}) as the reason on the client. The idea here is
that bugs should not be masked by abortConnection, in particular
unexpected exceptions.
"""
self.runAbortTest(ResumeThrowsClient,
ConnectableProtocol,
clientConnectionLostReason=ZeroDivisionError)
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(errors), 1)
class AbortConnectionTestCase(ReactorBuilder, AbortConnectionMixin):
"""
TCP-specific L{AbortConnectionMixin} tests.
"""
requiredInterfaces = (IReactorTCP,)
endpoints = TCPCreator()
globals().update(AbortConnectionTestCase.makeTestCaseClasses())
class SimpleUtilityTestCase(TestCase):
"""
Simple, direct tests for helpers within L{twisted.internet.tcp}.
"""
if ipv6Skip:
skip = ipv6Skip
def test_resolveNumericHost(self):
"""
L{_resolveIPv6} raises a L{socket.gaierror} (L{socket.EAI_NONAME}) when
invoked with a non-numeric host. (In other words, it is passing
L{socket.AI_NUMERICHOST} to L{socket.getaddrinfo} and will not
accidentally block if it receives bad input.)
"""
err = self.assertRaises(socket.gaierror, _resolveIPv6, "localhost", 1)
self.assertEqual(err.args[0], socket.EAI_NONAME)
def test_resolveNumericService(self):
"""
L{_resolveIPv6} raises a L{socket.gaierror} (L{socket.EAI_NONAME}) when
invoked with a non-numeric port. (In other words, it is passing
L{socket.AI_NUMERICSERV} to L{socket.getaddrinfo} and will not
accidentally block if it receives bad input.)
"""
err = self.assertRaises(socket.gaierror, _resolveIPv6, "::1", "http")
self.assertEqual(err.args[0], socket.EAI_NONAME)
if platform.isWindows():
test_resolveNumericService.skip = ("The AI_NUMERICSERV flag is not "
"supported by Microsoft providers.")
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms738520.aspx
def test_resolveIPv6(self):
"""
L{_resolveIPv6} discovers the flow info and scope ID of an IPv6
address.
"""
result = _resolveIPv6("::1", 2)
self.assertEqual(len(result), 4)
# We can't say anything more useful about these than that they're
# integers, because the whole point of getaddrinfo is that you can never
# know a-priori know _anything_ about the network interfaces of the
# computer that you're on and you have to ask it.
self.assertIsInstance(result[2], int) # flow info
self.assertIsInstance(result[3], int) # scope id
# but, luckily, IP presentation format and what it means to be a port
# number are a little better specified.
self.assertEqual(result[:2], ("::1", 2))
| 33.543717 | 86 | 0.643169 |
from __future__ import division, absolute_import
__metaclass__ = type
import socket, errno
from zope.interface import implementer
from twisted.python.compat import _PY3
from twisted.python.runtime import platform
from twisted.python.failure import Failure
from twisted.python import log
from twisted.trial.unittest import SkipTest, TestCase
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.error import (
ConnectionLost, UserError, ConnectionRefusedError, ConnectionDone,
ConnectionAborted)
from twisted.internet.interfaces import (
ILoggingContext, IConnector, IReactorFDSet, IReactorSocket, IReactorTCP)
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.defer import (
Deferred, DeferredList, maybeDeferred, gatherResults)
from twisted.internet._endpointspy3 import (
TCP4ServerEndpoint, TCP4ClientEndpoint)
from twisted.internet.protocol import ServerFactory, ClientFactory, Protocol
from twisted.internet.interfaces import (
IPushProducer, IPullProducer, IHalfCloseableProtocol)
from twisted.internet.tcp import Connection, Server, _resolveIPv6
from twisted.internet.test.connectionmixins import (
LogObserverMixin, ConnectionTestsMixin, TCPClientTestsMixin, findFreePort,
ConnectableProtocol, EndpointCreator, runProtocolsWithReactor)
from twisted.internet.test.test_core import ObjectModelIntegrationMixin
from twisted.test.test_tcp import MyClientFactory, MyServerFactory
from twisted.test.test_tcp import ClosingFactory, ClientStartStopFactory
try:
from OpenSSL import SSL
except ImportError:
useSSL = False
else:
from twisted.internet.ssl import ClientContextFactory
useSSL = True
try:
socket.socket(socket.AF_INET6, socket.SOCK_STREAM).close()
except socket.error as e:
ipv6Skip = str(e)
else:
ipv6Skip = None
if platform.isWindows():
from twisted.internet.test import _win32ifaces
getLinkLocalIPv6Addresses = _win32ifaces.win32GetLinkLocalIPv6Addresses
else:
try:
from twisted.internet.test import _posixifaces
except ImportError:
getLinkLocalIPv6Addresses = lambda: []
else:
getLinkLocalIPv6Addresses = _posixifaces.posixGetLinkLocalIPv6Addresses
def getLinkLocalIPv6Address():
addresses = getLinkLocalIPv6Addresses()
if addresses:
return addresses[0]
raise SkipTest("Link local IPv6 address unavailable")
def connect(client, destination):
(host, port) = destination
if '%' in host or ':' in host:
address = socket.getaddrinfo(host, port)[0][4]
else:
address = (host, port)
client.connect(address)
class FakeSocket(object):
def __init__(self, data):
self.data = data
self.sendBuffer = []
def setblocking(self, blocking):
self.blocking = blocking
def recv(self, size):
return self.data
def send(self, bytes):
self.sendBuffer.append(bytes)
return len(bytes)
def shutdown(self, how):
def close(self):
def setsockopt(self, *args):
def fileno(self):
return 1
class TestFakeSocket(TestCase):
def test_blocking(self):
skt = FakeSocket(b"someData")
skt.setblocking(0)
self.assertEqual(skt.blocking, 0)
def test_recv(self):
skt = FakeSocket(b"someData")
self.assertEqual(skt.recv(10), b"someData")
def test_send(self):
skt = FakeSocket(b"")
count = skt.send(b"foo")
self.assertEqual(count, 3)
self.assertEqual(skt.sendBuffer, [b"foo"])
class FakeProtocol(Protocol):
def dataReceived(self, data):
return ()
@implementer(IReactorFDSet)
class _FakeFDSetReactor(object):
addReader = addWriter = removeReader = removeWriter = (
lambda self, desc: None)
class TCPServerTests(TestCase):
def setUp(self):
self.reactor = _FakeFDSetReactor()
class FakePort(object):
_realPortNumber = 3
self.skt = FakeSocket(b"")
self.protocol = Protocol()
self.server = Server(
self.skt, self.protocol, ("", 0), FakePort(), None, self.reactor)
def test_writeAfterDisconnect(self):
self.server.connectionLost(
Failure(Exception("Simulated lost connection")))
self.server.write(b"hello world")
self.assertEqual(self.skt.sendBuffer, [])
def test_writeAfteDisconnectAfterTLS(self):
self.server.TLS = True
self.test_writeAfterDisconnect()
def test_writeSequenceAfterDisconnect(self):
self.server.connectionLost(
Failure(Exception("Simulated lost connection")))
self.server.writeSequence([b"hello world"])
self.assertEqual(self.skt.sendBuffer, [])
def test_writeSequenceAfteDisconnectAfterTLS(self):
self.server.TLS = True
self.test_writeSequenceAfterDisconnect()
class TCPConnectionTests(TestCase):
def test_doReadWarningIsRaised(self):
skt = FakeSocket(b"someData")
protocol = FakeProtocol()
conn = Connection(skt, protocol)
conn.doRead()
warnings = self.flushWarnings([FakeProtocol.dataReceived])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]["message"],
"Returning a value other than None from "
"twisted.internet.test.test_tcp.FakeProtocol.dataReceived "
"is deprecated since Twisted 11.0.0.")
self.assertEqual(len(warnings), 1)
def test_noTLSBeforeStartTLS(self):
skt = FakeSocket(b"")
protocol = FakeProtocol()
conn = Connection(skt, protocol)
self.assertFalse(conn.TLS)
def test_tlsAfterStartTLS(self):
skt = FakeSocket(b"")
protocol = FakeProtocol()
conn = Connection(skt, protocol, reactor=_FakeFDSetReactor())
conn._tlsClientDefault = True
conn.startTLS(ClientContextFactory(), True)
self.assertTrue(conn.TLS)
if not useSSL:
test_tlsAfterStartTLS.skip = "No SSL support available"
class TCPCreator(EndpointCreator):
interface = "127.0.0.1"
def server(self, reactor):
return TCP4ServerEndpoint(reactor, 0, interface=self.interface)
def client(self, reactor, serverAddress):
return TCP4ClientEndpoint(reactor, self.interface, serverAddress.port)
class TCP6Creator(TCPCreator):
def __init__(self):
self.interface = getLinkLocalIPv6Address()
class TCPClientTestsBase(ReactorBuilder, ConnectionTestsMixin,
TCPClientTestsMixin):
requiredInterfaces = (IReactorTCP,)
port = 1234
@property
def interface(self):
return self.endpoints.interface
class TCP4ClientTestsBuilder(TCPClientTestsBase):
fakeDomainName = 'some-fake.domain.example.com'
family = socket.AF_INET
addressClass = IPv4Address
endpoints = TCPCreator()
class TCP6ClientTestsBuilder(TCPClientTestsBase):
if ipv6Skip:
skip = ipv6Skip
family = socket.AF_INET6
addressClass = IPv6Address
def setUp(self):
# are being skipped:
self.endpoints = TCP6Creator()
# This is used by test_addresses to test the distinction between the
# resolved name and the name on the socket itself. All the same
# invariants should hold, but giving back an IPv6 address from a
# resolver is not something the reactor can handle, so instead, we make
# it so that the connect call for the IPv6 address test simply uses an
# address literal.
self.fakeDomainName = self.endpoints.interface
class TCPConnectorTestsBuilder(ReactorBuilder):
requiredInterfaces = (IReactorTCP,)
def test_connectorIdentity(self):
serverFactory = ClosingFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
serverFactory.port = tcpPort
portNumber = tcpPort.getHost().port
seenConnectors = []
seenFailures = []
clientFactory = ClientStartStopFactory()
clientFactory.clientConnectionLost = (
lambda connector, reason: (seenConnectors.append(connector),
seenFailures.append(reason)))
clientFactory.startedConnecting = seenConnectors.append
connector = reactor.connectTCP(self.interface, portNumber,
clientFactory)
self.assertTrue(IConnector.providedBy(connector))
dest = connector.getDestination()
self.assertEqual(dest.type, "TCP")
self.assertEqual(dest.host, self.interface)
self.assertEqual(dest.port, portNumber)
clientFactory.whenStopped.addBoth(lambda _: reactor.stop())
self.runReactor(reactor)
seenFailures[0].trap(ConnectionDone)
self.assertEqual(seenConnectors, [connector, connector])
def test_userFail(self):
serverFactory = MyServerFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
portNumber = tcpPort.getHost().port
fatalErrors = []
def startedConnecting(connector):
try:
connector.stopConnecting()
except Exception:
fatalErrors.append(Failure())
reactor.stop()
clientFactory = ClientStartStopFactory()
clientFactory.startedConnecting = startedConnecting
clientFactory.whenStopped.addBoth(lambda _: reactor.stop())
reactor.callWhenRunning(lambda: reactor.connectTCP(self.interface,
portNumber,
clientFactory))
self.runReactor(reactor)
if fatalErrors:
self.fail(fatalErrors[0].getTraceback())
clientFactory.reason.trap(UserError)
self.assertEqual(clientFactory.failed, 1)
def test_reconnect(self):
serverFactory = ClosingFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
serverFactory.port = tcpPort
portNumber = tcpPort.getHost().port
clientFactory = MyClientFactory()
def clientConnectionLost(connector, reason):
connector.connect()
clientFactory.clientConnectionLost = clientConnectionLost
reactor.connectTCP(self.interface, portNumber, clientFactory)
protocolMadeAndClosed = []
def reconnectFailed(ignored):
p = clientFactory.protocol
protocolMadeAndClosed.append((p.made, p.closed))
reactor.stop()
clientFactory.failDeferred.addCallback(reconnectFailed)
self.runReactor(reactor)
clientFactory.reason.trap(ConnectionRefusedError)
self.assertEqual(protocolMadeAndClosed, [(1, 1)])
class TCP4ConnectorTestsBuilder(TCPConnectorTestsBuilder):
interface = '127.0.0.1'
family = socket.AF_INET
addressClass = IPv4Address
class TCP6ConnectorTestsBuilder(TCPConnectorTestsBuilder):
family = socket.AF_INET6
addressClass = IPv6Address
if ipv6Skip:
skip = ipv6Skip
def setUp(self):
self.interface = getLinkLocalIPv6Address()
def createTestSocket(test, addressFamily, socketType):
skt = socket.socket(addressFamily, socketType)
test.addCleanup(skt.close)
return skt
class StreamTransportTestsMixin(LogObserverMixin):
def test_startedListeningLogMessage(self):
loggedMessages = self.observe()
reactor = self.buildReactor()
@implementer(ILoggingContext)
class SomeFactory(ServerFactory):
def logPrefix(self):
return "Crazy Factory"
factory = SomeFactory()
p = self.getListeningPort(reactor, factory)
expectedMessage = self.getExpectedStartListeningLogMessage(
p, "Crazy Factory")
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_connectionLostLogMsg(self):
loggedMessages = []
def logConnectionLostMsg(eventDict):
loggedMessages.append(log.textFromEventDict(eventDict))
reactor = self.buildReactor()
p = self.getListeningPort(reactor, ServerFactory())
expectedMessage = self.getExpectedConnectionLostLogMsg(p)
log.addObserver(logConnectionLostMsg)
def stopReactor(ignored):
log.removeObserver(logConnectionLostMsg)
reactor.stop()
def doStopListening():
log.addObserver(logConnectionLostMsg)
maybeDeferred(p.stopListening).addCallback(stopReactor)
reactor.callWhenRunning(doStopListening)
reactor.run()
self.assertIn(expectedMessage, loggedMessages)
def test_allNewStyle(self):
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory())
self.assertFullyNewStyle(port)
class ListenTCPMixin(object):
def getListeningPort(self, reactor, factory, port=0, interface=''):
return reactor.listenTCP(port, factory, interface=interface)
class SocketTCPMixin(object):
def getListeningPort(self, reactor, factory, port=0, interface=''):
if IReactorSocket.providedBy(reactor):
if ':' in interface:
domain = socket.AF_INET6
address = socket.getaddrinfo(interface, port)[0][4]
else:
domain = socket.AF_INET
address = (interface, port)
portSock = socket.socket(domain)
portSock.bind(address)
portSock.listen(3)
portSock.setblocking(False)
try:
return reactor.adoptStreamPort(
portSock.fileno(), portSock.family, factory)
finally:
# The socket should still be open; fileno will raise if it is
# not.
portSock.fileno()
# Now clean it up, because the rest of the test does not need
# it.
portSock.close()
else:
raise SkipTest("Reactor does not provide IReactorSocket")
class TCPPortTestsMixin(object):
requiredInterfaces = (IReactorTCP,)
def getExpectedStartListeningLogMessage(self, port, factory):
return "%s starting on %d" % (
factory, port.getHost().port)
def getExpectedConnectionLostLogMsg(self, port):
return "(TCP Port %s Closed)" % (port.getHost().port,)
def test_portGetHostOnIPv4(self):
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory())
address = port.getHost()
self.assertIsInstance(address, IPv4Address)
def test_portGetHostOnIPv6(self):
reactor = self.buildReactor()
host, portNumber = findFreePort(
family=socket.AF_INET6, interface='::1')[:2]
port = self.getListeningPort(
reactor, ServerFactory(), portNumber, host)
address = port.getHost()
self.assertIsInstance(address, IPv6Address)
self.assertEqual('::1', address.host)
self.assertEqual(portNumber, address.port)
if ipv6Skip:
test_portGetHostOnIPv6.skip = ipv6Skip
def test_portGetHostOnIPv6ScopeID(self):
linkLocal = getLinkLocalIPv6Address()
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory(), 0, linkLocal)
address = port.getHost()
self.assertIsInstance(address, IPv6Address)
self.assertEqual(linkLocal, address.host)
if ipv6Skip:
test_portGetHostOnIPv6ScopeID.skip = ipv6Skip
def _buildProtocolAddressTest(self, client, interface):
class ObserveAddress(ServerFactory):
def buildProtocol(self, address):
reactor.stop()
self.observedAddress = address
return Protocol()
factory = ObserveAddress()
reactor = self.buildReactor()
port = self.getListeningPort(reactor, factory, 0, interface)
client.setblocking(False)
try:
connect(client, (port.getHost().host, port.getHost().port))
except socket.error as e:
errnum, message = e.args
self.assertIn(errnum, (errno.EINPROGRESS, errno.EWOULDBLOCK))
self.runReactor(reactor)
return factory.observedAddress
def test_buildProtocolIPv4Address(self):
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv4Address('TCP', *client.getsockname()), observedAddress)
def test_buildProtocolIPv6Address(self):
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), observedAddress)
if ipv6Skip:
test_buildProtocolIPv6Address.skip = ipv6Skip
def test_buildProtocolIPv6AddressScopeID(self):
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), observedAddress)
if ipv6Skip:
test_buildProtocolIPv6AddressScopeID.skip = ipv6Skip
def _serverGetConnectionAddressTest(self, client, interface, which):
class ObserveAddress(Protocol):
def makeConnection(self, transport):
reactor.stop()
self.factory.address = getattr(transport, which)()
reactor = self.buildReactor()
factory = ServerFactory()
factory.protocol = ObserveAddress
port = self.getListeningPort(reactor, factory, 0, interface)
client.setblocking(False)
try:
connect(client, (port.getHost().host, port.getHost().port))
except socket.error as e:
errnum, message = e.args
self.assertIn(errnum, (errno.EINPROGRESS, errno.EWOULDBLOCK))
self.runReactor(reactor)
return factory.address
def test_serverGetHostOnIPv4(self):
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv4Address('TCP', *client.getpeername()), hostAddress)
def test_serverGetHostOnIPv6(self):
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv6Address('TCP', *client.getpeername()[:2]), hostAddress)
if ipv6Skip:
test_serverGetHostOnIPv6.skip = ipv6Skip
def test_serverGetHostOnIPv6ScopeID(self):
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv6Address('TCP', *client.getpeername()[:2]), hostAddress)
if ipv6Skip:
test_serverGetHostOnIPv6ScopeID.skip = ipv6Skip
def test_serverGetPeerOnIPv4(self):
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv4Address('TCP', *client.getsockname()), peerAddress)
def test_serverGetPeerOnIPv6(self):
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), peerAddress)
if ipv6Skip:
test_serverGetPeerOnIPv6.skip = ipv6Skip
def test_serverGetPeerOnIPv6ScopeID(self):
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), peerAddress)
if ipv6Skip:
test_serverGetPeerOnIPv6ScopeID.skip = ipv6Skip
class TCPPortTestsBuilder(ReactorBuilder, ListenTCPMixin, TCPPortTestsMixin,
ObjectModelIntegrationMixin,
StreamTransportTestsMixin):
pass
class TCPFDPortTestsBuilder(ReactorBuilder, SocketTCPMixin, TCPPortTestsMixin,
ObjectModelIntegrationMixin,
StreamTransportTestsMixin):
pass
class StopStartReadingProtocol(Protocol):
def connectionMade(self):
self.data = b''
self.pauseResumeProducing(3)
def pauseResumeProducing(self, counter):
self.transport.pauseProducing()
self.transport.resumeProducing()
if counter:
self.factory.reactor.callLater(0,
self.pauseResumeProducing, counter - 1)
else:
self.factory.reactor.callLater(0,
self.factory.ready.callback, self)
def dataReceived(self, data):
log.msg('got data', len(data))
self.data += data
if len(self.data) == 4*4096:
self.factory.stop.callback(self.data)
class TCPConnectionTestsBuilder(ReactorBuilder):
requiredInterfaces = (IReactorTCP,)
def test_stopStartReading(self):
sf = ServerFactory()
reactor = sf.reactor = self.buildReactor()
skippedReactors = ["Glib2Reactor", "Gtk2Reactor"]
reactorClassName = reactor.__class__.__name__
if reactorClassName in skippedReactors and platform.isWindows():
raise SkipTest(
"This test is broken on gtk/glib under Windows.")
sf.protocol = StopStartReadingProtocol
sf.ready = Deferred()
sf.stop = Deferred()
p = reactor.listenTCP(0, sf)
port = p.getHost().port
def proceed(protos, port):
self.assertTrue(protos[0])
self.assertTrue(protos[1])
protos = protos[0][1], protos[1][1]
protos[0].transport.write(b'x' * (2 * 4096) + b'y' * (2 * 4096))
return (sf.stop.addCallback(cleanup, protos, port)
.addCallback(lambda ign: reactor.stop()))
def cleanup(data, protos, port):
self.assertEqual(data, b'x'*(2*4096) + b'y'*(2*4096),
'did not get the right data')
return DeferredList([
maybeDeferred(protos[0].transport.loseConnection),
maybeDeferred(protos[1].transport.loseConnection),
maybeDeferred(port.stopListening)])
cc = TCP4ClientEndpoint(reactor, '127.0.0.1', port)
cf = ClientFactory()
cf.protocol = Protocol
d = DeferredList([cc.connect(cf), sf.ready]).addCallback(proceed, p)
d.addErrback(log.err)
self.runReactor(reactor)
def test_connectionLostAfterPausedTransport(self):
class Pauser(ConnectableProtocol):
def __init__(self):
self.events = []
def dataReceived(self, bytes):
self.events.append("paused")
self.transport.pauseProducing()
self.reactor.callLater(0, self.resume)
def resume(self):
self.events.append("resumed")
self.transport.resumeProducing()
def connectionLost(self, reason):
# This is the event you have been waiting for.
self.events.append("lost")
ConnectableProtocol.connectionLost(self, reason)
class Client(ConnectableProtocol):
def connectionMade(self):
self.transport.write(b"some bytes for you")
self.transport.loseConnection()
pauser = Pauser()
runProtocolsWithReactor(self, pauser, Client(), TCPCreator())
self.assertEqual(pauser.events, ["paused", "resumed", "lost"])
def test_doubleHalfClose(self):
@implementer(IHalfCloseableProtocol)
class ListenerProtocol(ConnectableProtocol):
def readConnectionLost(self):
self.transport.loseWriteConnection()
def writeConnectionLost(self):
self.transport.loseConnection()
class Client(ConnectableProtocol):
def connectionMade(self):
self.transport.loseConnection()
# If test fails, reactor won't stop and we'll hit timeout:
runProtocolsWithReactor(
self, ListenerProtocol(), Client(), TCPCreator())
class WriteSequenceTestsMixin(object):
requiredInterfaces = (IReactorTCP,)
def setWriteBufferSize(self, transport, value):
if getattr(transport, "writeBufferSize", None) is not None:
transport.writeBufferSize = value
else:
transport.bufferSize = value
def test_writeSequeceWithoutWrite(self):
def connected(protocols):
client, server, port = protocols
def dataReceived(data):
log.msg("data received: %r" % data)
self.assertEqual(data, b"Some sequence splitted")
client.transport.loseConnection()
server.dataReceived = dataReceived
client.transport.writeSequence([b"Some ", b"sequence ", b"splitted"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_writeSequenceWithUnicodeRaisesException(self):
def connected(protocols):
client, server, port = protocols
exc = self.assertRaises(
TypeError,
server.transport.writeSequence, [u"Unicode is not kosher"])
self.assertEqual(str(exc), "Data must not be unicode")
server.transport.loseConnection()
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_streamingProducer(self):
@implementer(IPushProducer)
class SaveActionProducer(object):
client = None
server = None
def __init__(self):
self.actions = []
def pauseProducing(self):
self.actions.append("pause")
def resumeProducing(self):
self.actions.append("resume")
# Unregister the producer so the connection can close
self.client.transport.unregisterProducer()
# This is why the code below waits for the server connection
# first - so we have it to close here. We close the server
# side because win32evenreactor cannot reliably observe us
# closing the client side (#5285).
self.server.transport.loseConnection()
def stopProducing(self):
self.actions.append("stop")
producer = SaveActionProducer()
def connected(protocols):
client, server = protocols[:2]
producer.client = client
producer.server = server
# Register a streaming producer and verify that it gets paused
# after it writes more than the local send buffer can hold.
client.transport.registerProducer(producer, True)
self.assertEqual(producer.actions, [])
self.setWriteBufferSize(client.transport, 500)
client.transport.writeSequence([b"x" * 50] * 20)
self.assertEqual(producer.actions, ["pause"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
# After the send buffer gets a chance to empty out a bit, the producer
# should be resumed.
self.assertEqual(producer.actions, ["pause", "resume"])
def test_nonStreamingProducer(self):
test = self
@implementer(IPullProducer)
class SaveActionProducer(object):
client = None
def __init__(self):
self.actions = []
def resumeProducing(self):
self.actions.append("resume")
if self.actions.count("resume") == 2:
self.client.transport.stopConsuming()
else:
test.setWriteBufferSize(self.client.transport, 500)
self.client.transport.writeSequence([b"x" * 50] * 20)
def stopProducing(self):
self.actions.append("stop")
producer = SaveActionProducer()
def connected(protocols):
client = protocols[0]
producer.client = client
# Register a non-streaming producer and verify that it is resumed
# immediately.
client.transport.registerProducer(producer, False)
self.assertEqual(producer.actions, ["resume"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
# After the local send buffer empties out, the producer should be
# resumed again.
self.assertEqual(producer.actions, ["resume", "resume"])
class TCPTransportServerAddressTestMixin(object):
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
raise NotImplementedError()
def _testServerAddress(self, interface, addressFamily, adressClass):
def connected(protocols):
client, server, port = protocols
try:
self.assertEqual(
"<AccumulatingProtocol #%s on %s>" %
(server.transport.sessionno, port.getHost().port),
str(server.transport))
self.assertEqual(
"AccumulatingProtocol,%s,%s" %
(server.transport.sessionno, interface),
server.transport.logstr)
[peerAddress] = server.factory.peerAddresses
self.assertIsInstance(peerAddress, adressClass)
self.assertEqual('TCP', peerAddress.type)
self.assertEqual(interface, peerAddress.host)
finally:
# Be certain to drop the connection so the test completes.
server.transport.loseConnection()
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, interface, addressFamily)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_serverAddressTCP4(self):
return self._testServerAddress("127.0.0.1", socket.AF_INET,
IPv4Address)
def test_serverAddressTCP6(self):
return self._testServerAddress(getLinkLocalIPv6Address(),
socket.AF_INET6, IPv6Address)
if ipv6Skip:
test_serverAddressTCP6.skip = ipv6Skip
class TCPTransportTestsBuilder(TCPTransportServerAddressTestMixin,
WriteSequenceTestsMixin, ReactorBuilder):
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
server = MyServerFactory()
server.protocolConnectionMade = Deferred()
server.protocolConnectionLost = Deferred()
client = MyClientFactory()
client.protocolConnectionMade = Deferred()
client.protocolConnectionLost = Deferred()
port = reactor.listenTCP(0, server, interface=interface)
lostDeferred = gatherResults([client.protocolConnectionLost,
server.protocolConnectionLost])
def stop(result):
reactor.stop()
return result
lostDeferred.addBoth(stop)
startDeferred = gatherResults([client.protocolConnectionMade,
server.protocolConnectionMade])
deferred = Deferred()
def start(protocols):
client, server = protocols
log.msg("client connected %s" % client)
log.msg("server connected %s" % server)
deferred.callback((client, server, port))
startDeferred.addCallback(start)
reactor.connectTCP(interface, port.getHost().port, client)
return deferred
class AdoptStreamConnectionTestsBuilder(TCPTransportServerAddressTestMixin,
WriteSequenceTestsMixin,
ReactorBuilder):
requiredInterfaces = (IReactorFDSet, IReactorSocket)
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
firstServer = MyServerFactory()
firstServer.protocolConnectionMade = Deferred()
server = MyServerFactory()
server.protocolConnectionMade = Deferred()
server.protocolConnectionLost = Deferred()
client = MyClientFactory()
client.protocolConnectionMade = Deferred()
client.protocolConnectionLost = Deferred()
port = reactor.listenTCP(0, firstServer, interface=interface)
def firtServerConnected(proto):
reactor.removeReader(proto.transport)
reactor.removeWriter(proto.transport)
reactor.adoptStreamConnection(
proto.transport.fileno(), addressFamily, server)
firstServer.protocolConnectionMade.addCallback(firtServerConnected)
lostDeferred = gatherResults([client.protocolConnectionLost,
server.protocolConnectionLost])
def stop(result):
if reactor.running:
reactor.stop()
return result
lostDeferred.addBoth(stop)
deferred = Deferred()
deferred.addErrback(stop)
startDeferred = gatherResults([client.protocolConnectionMade,
server.protocolConnectionMade])
def start(protocols):
client, server = protocols
log.msg("client connected %s" % client)
log.msg("server connected %s" % server)
deferred.callback((client, server, port))
startDeferred.addCallback(start)
reactor.connectTCP(interface, port.getHost().port, client)
return deferred
globals().update(TCP4ClientTestsBuilder.makeTestCaseClasses())
globals().update(TCP6ClientTestsBuilder.makeTestCaseClasses())
globals().update(TCPPortTestsBuilder.makeTestCaseClasses())
globals().update(TCPFDPortTestsBuilder.makeTestCaseClasses())
globals().update(TCPConnectionTestsBuilder.makeTestCaseClasses())
globals().update(TCP4ConnectorTestsBuilder.makeTestCaseClasses())
globals().update(TCP6ConnectorTestsBuilder.makeTestCaseClasses())
globals().update(TCPTransportTestsBuilder.makeTestCaseClasses())
globals().update(AdoptStreamConnectionTestsBuilder.makeTestCaseClasses())
class ServerAbortsTwice(ConnectableProtocol):
def dataReceived(self, data):
self.transport.abortConnection()
self.transport.abortConnection()
class ServerAbortsThenLoses(ConnectableProtocol):
def dataReceived(self, data):
self.transport.abortConnection()
self.transport.loseConnection()
class AbortServerWritingProtocol(ConnectableProtocol):
def connectionMade(self):
self.transport.write(b"ready")
class ReadAbortServerProtocol(AbortServerWritingProtocol):
def dataReceived(self, data):
if data.replace(b'X', b''):
raise Exception("Unexpectedly received data.")
class NoReadServer(ConnectableProtocol):
def connectionMade(self):
self.transport.stopReading()
class EventualNoReadServer(ConnectableProtocol):
gotData = False
stoppedReading = False
def dataReceived(self, data):
if not self.gotData:
self.gotData = True
self.transport.registerProducer(self, False)
self.transport.write(b"hello")
def resumeProducing(self):
if self.stoppedReading:
return
self.stoppedReading = True
# We've written out the data:
self.transport.stopReading()
def pauseProducing(self):
pass
def stopProducing(self):
pass
class BaseAbortingClient(ConnectableProtocol):
inReactorMethod = False
def connectionLost(self, reason):
if self.inReactorMethod:
raise RuntimeError("BUG: connectionLost was called re-entrantly!")
ConnectableProtocol.connectionLost(self, reason)
class WritingButNotAbortingClient(BaseAbortingClient):
def connectionMade(self):
self.transport.write(b"hello")
class AbortingClient(BaseAbortingClient):
def dataReceived(self, data):
self.inReactorMethod = True
self.writeAndAbort()
self.inReactorMethod = False
def writeAndAbort(self):
self.transport.write(b"X" * 10000)
self.transport.abortConnection()
self.transport.write(b"Y" * 10000)
class AbortingTwiceClient(AbortingClient):
def writeAndAbort(self):
AbortingClient.writeAndAbort(self)
self.transport.abortConnection()
class AbortingThenLosingClient(AbortingClient):
def writeAndAbort(self):
AbortingClient.writeAndAbort(self)
self.transport.loseConnection()
class ProducerAbortingClient(ConnectableProtocol):
inReactorMethod = True
producerStopped = False
def write(self):
self.transport.write(b"lalala" * 127000)
self.inRegisterProducer = True
self.transport.registerProducer(self, False)
self.inRegisterProducer = False
def connectionMade(self):
self.write()
def resumeProducing(self):
self.inReactorMethod = True
if not self.inRegisterProducer:
self.transport.abortConnection()
self.inReactorMethod = False
def stopProducing(self):
self.producerStopped = True
def connectionLost(self, reason):
if not self.producerStopped:
raise RuntimeError("BUG: stopProducing() was never called.")
if self.inReactorMethod:
raise RuntimeError("BUG: connectionLost called re-entrantly!")
ConnectableProtocol.connectionLost(self, reason)
class StreamingProducerClient(ConnectableProtocol):
paused = False
extraWrites = 0
inReactorMethod = False
def connectionMade(self):
self.write()
def write(self):
self.transport.registerProducer(self, True)
for i in range(100):
self.transport.write(b"1234567890" * 32000)
def resumeProducing(self):
self.paused = False
def stopProducing(self):
pass
def pauseProducing(self):
if self.paused:
return
self.paused = True
self.reactor.callLater(0.01, self.doAbort)
def doAbort(self):
if not self.paused:
log.err(RuntimeError("BUG: We should be paused a this point."))
self.inReactorMethod = True
self.transport.abortConnection()
self.inReactorMethod = False
def connectionLost(self, reason):
self.otherProtocol.transport.startReading()
ConnectableProtocol.connectionLost(self, reason)
class StreamingProducerClientLater(StreamingProducerClient):
def connectionMade(self):
self.transport.write(b"hello")
self.gotData = False
def dataReceived(self, data):
if not self.gotData:
self.gotData = True
self.write()
class ProducerAbortingClientLater(ProducerAbortingClient):
def connectionMade(self):
pass
def dataReceived(self, data):
self.write()
class DataReceivedRaisingClient(AbortingClient):
def dataReceived(self, data):
self.transport.abortConnection()
raise ZeroDivisionError("ONO")
class ResumeThrowsClient(ProducerAbortingClient):
def resumeProducing(self):
if not self.inRegisterProducer:
self.transport.abortConnection()
raise ZeroDivisionError("ono!")
def connectionLost(self, reason):
# if the we blew up in resumeProducing, consumers are justified in
# giving up on the producer and not calling stopProducing.
ConnectableProtocol.connectionLost(self, reason)
class AbortConnectionMixin(object):
# Override in subclasses, should be a EndpointCreator instance:
endpoints = None
def runAbortTest(self, clientClass, serverClass,
clientConnectionLostReason=None):
clientExpectedExceptions = (ConnectionAborted, ConnectionLost)
serverExpectedExceptions = (ConnectionLost, ConnectionDone)
# In TLS tests we may get SSL.Error instead of ConnectionLost,
# since we're trashing the TLS protocol layer.
if useSSL:
clientExpectedExceptions = clientExpectedExceptions + (SSL.Error,)
serverExpectedExceptions = serverExpectedExceptions + (SSL.Error,)
client = clientClass()
server = serverClass()
client.otherProtocol = server
server.otherProtocol = client
reactor = runProtocolsWithReactor(self, server, client, self.endpoints)
self.assertEqual(reactor.removeAll(), [])
delayedCalls = reactor.getDelayedCalls()
self.assertEqual(len(delayedCalls), 1, map(str, delayedCalls))
if clientConnectionLostReason is not None:
self.assertIsInstance(
client.disconnectReason.value,
(clientConnectionLostReason,) + clientExpectedExceptions)
else:
self.assertIsInstance(client.disconnectReason.value,
clientExpectedExceptions)
self.assertIsInstance(server.disconnectReason.value, serverExpectedExceptions)
def test_dataReceivedAbort(self):
return self.runAbortTest(AbortingClient, ReadAbortServerProtocol)
def test_clientAbortsConnectionTwice(self):
return self.runAbortTest(AbortingTwiceClient, ReadAbortServerProtocol)
def test_clientAbortsConnectionThenLosesConnection(self):
return self.runAbortTest(AbortingThenLosingClient,
ReadAbortServerProtocol)
def test_serverAbortsConnectionTwice(self):
return self.runAbortTest(WritingButNotAbortingClient, ServerAbortsTwice,
clientConnectionLostReason=ConnectionLost)
def test_serverAbortsConnectionThenLosesConnection(self):
return self.runAbortTest(WritingButNotAbortingClient,
ServerAbortsThenLoses,
clientConnectionLostReason=ConnectionLost)
def test_resumeProducingAbort(self):
self.runAbortTest(ProducerAbortingClient,
ConnectableProtocol)
def test_resumeProducingAbortLater(self):
return self.runAbortTest(ProducerAbortingClientLater,
AbortServerWritingProtocol)
def test_fullWriteBuffer(self):
self.runAbortTest(StreamingProducerClient,
NoReadServer)
def test_fullWriteBufferAfterByteExchange(self):
return self.runAbortTest(StreamingProducerClientLater,
EventualNoReadServer)
def test_dataReceivedThrows(self):
self.runAbortTest(DataReceivedRaisingClient,
AbortServerWritingProtocol,
clientConnectionLostReason=ZeroDivisionError)
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(errors), 1)
def test_resumeProducingThrows(self):
self.runAbortTest(ResumeThrowsClient,
ConnectableProtocol,
clientConnectionLostReason=ZeroDivisionError)
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(errors), 1)
class AbortConnectionTestCase(ReactorBuilder, AbortConnectionMixin):
requiredInterfaces = (IReactorTCP,)
endpoints = TCPCreator()
globals().update(AbortConnectionTestCase.makeTestCaseClasses())
class SimpleUtilityTestCase(TestCase):
if ipv6Skip:
skip = ipv6Skip
def test_resolveNumericHost(self):
err = self.assertRaises(socket.gaierror, _resolveIPv6, "localhost", 1)
self.assertEqual(err.args[0], socket.EAI_NONAME)
def test_resolveNumericService(self):
err = self.assertRaises(socket.gaierror, _resolveIPv6, "::1", "http")
self.assertEqual(err.args[0], socket.EAI_NONAME)
if platform.isWindows():
test_resolveNumericService.skip = ("The AI_NUMERICSERV flag is not "
"supported by Microsoft providers.")
def test_resolveIPv6(self):
result = _resolveIPv6("::1", 2)
self.assertEqual(len(result), 4)
self.assertIsInstance(result[2], int) # flow info
self.assertIsInstance(result[3], int) # scope id
# but, luckily, IP presentation format and what it means to be a port
# number are a little better specified.
self.assertEqual(result[:2], ("::1", 2))
| true | true |
f73e726ea1457ddee39ca2fa59fd9ea439126892 | 3,849 | py | Python | privatter-dl.py | ColdLunaris/privatter-dl | 6b94cf9c260d8d57009572422c18f07d771e956b | [
"Apache-2.0"
] | null | null | null | privatter-dl.py | ColdLunaris/privatter-dl | 6b94cf9c260d8d57009572422c18f07d771e956b | [
"Apache-2.0"
] | null | null | null | privatter-dl.py | ColdLunaris/privatter-dl | 6b94cf9c260d8d57009572422c18f07d771e956b | [
"Apache-2.0"
] | null | null | null | import requests, argparse, os, asyncio, concurrent.futures
from termcolor import colored
from bs4 import BeautifulSoup
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("-d", dest="directory", required=False, help="destination where all images will be saved")
p.add_argument("-u", dest="url", required=True, help="url for profile to be downloaded")
p.add_argument("-v", dest="verbose", required=False, default=True, help="specifies verbosity to true or false. Default true")
p.add_argument("-U", dest="username", required=True, help="Username for login to Privatter")
p.add_argument("-P", dest="password", required=True, help="Password for login to Privatter")
p.add_argument("-t", dest="threads", required=False, help="Amount of threads to spawn while downloading. Default is 1")
return p.parse_args()
def create_directory(url, dir):
p = os.getcwd() if dir is None else dir
if not p.endswith('/'):
p = p + '/'
# Get username from supplied url. Create destination directory with it
p = p + url.split('/')[-1].split('#')[0]
if not os.path.exists(p):
os.makedirs(p)
return p
def save_image(link, path, v):
name = link.rsplit('/', 1)[-1]
path = path + '/' + name
if os.path.exists(path):
if v is True:
print(colored(path, 'green'))
return
# Privatter, unlike poipiku, does not host images themselves. No auth needed once URLs have been found
r = requests.get(link, stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
for c in r:
f.write(c)
if v is True:
print(colored(path, 'white'))
def create_session(username, password):
s = requests.Session()
s.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
'Host': 'privatter.net'
}
# Probably a really bad way to handle passwords... But we need to generate a unique login per session.
# Cookies CAN be used, but it's much easier to just use plain username and password
payload = {
'mode': 'login',
'login_id': username,
'password': password
}
s.post('https://privatter.net/login_pass', data=payload)
return s
def get_image_sites(s, url):
r = s.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
pages = soup.findAll(class_="pull-left")
links = ['https://privatter.net' + str(page).split('href="')[1].split('">')[0] for page in pages]
return links[::-1]
def get_image_direct_link(s, url, path, v):
r = s.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
direct_links = soup.findAll(class_="image")
for link in direct_links:
link = str(link).split('href="')[1].split('"')[0]
save_image(link, path, v)
async def main():
a = parse_args()
with create_session(a.username, a.password) as s:
path = create_directory(a.url, a.directory)
links = get_image_sites(s, a.url)
threads = 1 if a.threads is None else int(a.threads)
with concurrent.futures.ThreadPoolExecutor(threads) as executor:
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(
executor,
get_image_direct_link,
*(s, link, path, a.verbose)
)
for link in links
]
for response in await asyncio.gather(*tasks):
pass
if __name__ == "__main__":
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(main())
loop.close() | 32.897436 | 140 | 0.617043 | import requests, argparse, os, asyncio, concurrent.futures
from termcolor import colored
from bs4 import BeautifulSoup
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("-d", dest="directory", required=False, help="destination where all images will be saved")
p.add_argument("-u", dest="url", required=True, help="url for profile to be downloaded")
p.add_argument("-v", dest="verbose", required=False, default=True, help="specifies verbosity to true or false. Default true")
p.add_argument("-U", dest="username", required=True, help="Username for login to Privatter")
p.add_argument("-P", dest="password", required=True, help="Password for login to Privatter")
p.add_argument("-t", dest="threads", required=False, help="Amount of threads to spawn while downloading. Default is 1")
return p.parse_args()
def create_directory(url, dir):
p = os.getcwd() if dir is None else dir
if not p.endswith('/'):
p = p + '/'
p = p + url.split('/')[-1].split('#')[0]
if not os.path.exists(p):
os.makedirs(p)
return p
def save_image(link, path, v):
name = link.rsplit('/', 1)[-1]
path = path + '/' + name
if os.path.exists(path):
if v is True:
print(colored(path, 'green'))
return
r = requests.get(link, stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
for c in r:
f.write(c)
if v is True:
print(colored(path, 'white'))
def create_session(username, password):
s = requests.Session()
s.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
'Host': 'privatter.net'
}
payload = {
'mode': 'login',
'login_id': username,
'password': password
}
s.post('https://privatter.net/login_pass', data=payload)
return s
def get_image_sites(s, url):
r = s.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
pages = soup.findAll(class_="pull-left")
links = ['https://privatter.net' + str(page).split('href="')[1].split('">')[0] for page in pages]
return links[::-1]
def get_image_direct_link(s, url, path, v):
r = s.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
direct_links = soup.findAll(class_="image")
for link in direct_links:
link = str(link).split('href="')[1].split('"')[0]
save_image(link, path, v)
async def main():
a = parse_args()
with create_session(a.username, a.password) as s:
path = create_directory(a.url, a.directory)
links = get_image_sites(s, a.url)
threads = 1 if a.threads is None else int(a.threads)
with concurrent.futures.ThreadPoolExecutor(threads) as executor:
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(
executor,
get_image_direct_link,
*(s, link, path, a.verbose)
)
for link in links
]
for response in await asyncio.gather(*tasks):
pass
if __name__ == "__main__":
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(main())
loop.close() | true | true |
f73e750bba87126ae1daf9bf2beb2f6676d31d68 | 440 | py | Python | test/rules/test_drowning_adult.py | rileyhazard/SmartVA-Analyze-1 | 0573eeff27d03f54e7506db4f1631c0cd9f54bbb | [
"MIT"
] | 4 | 2019-01-23T12:57:47.000Z | 2020-04-18T17:13:08.000Z | test/rules/test_drowning_adult.py | rileyhazard/SmartVA-Analyze-1 | 0573eeff27d03f54e7506db4f1631c0cd9f54bbb | [
"MIT"
] | 4 | 2019-01-09T22:10:07.000Z | 2022-02-16T04:57:06.000Z | test/rules/test_drowning_adult.py | rileyhazard/SmartVA-Analyze-1 | 0573eeff27d03f54e7506db4f1631c0cd9f54bbb | [
"MIT"
] | 11 | 2018-12-11T22:01:13.000Z | 2022-01-07T11:38:02.000Z | from smartva.rules import drowning_adult as drowning
from smartva.data.constants import *
VA = Adult
def test_pass():
row = {
VA.DROWNING: YES,
VA.INJURY_DAYS: 0,
}
assert drowning.logic_rule(row) is True
def test_fail_drowning():
row = {
VA.DROWNING: NO,
}
assert drowning.logic_rule(row) is False
def test_fail_no_data():
row = {}
assert drowning.logic_rule(row) is False
| 15.714286 | 52 | 0.647727 | from smartva.rules import drowning_adult as drowning
from smartva.data.constants import *
VA = Adult
def test_pass():
row = {
VA.DROWNING: YES,
VA.INJURY_DAYS: 0,
}
assert drowning.logic_rule(row) is True
def test_fail_drowning():
row = {
VA.DROWNING: NO,
}
assert drowning.logic_rule(row) is False
def test_fail_no_data():
row = {}
assert drowning.logic_rule(row) is False
| true | true |
f73e769bb9fa59c409f9cdcfe1e948ce5c05841e | 453 | py | Python | data/scripts/templates/object/tangible/wearables/boots/shared_boots_s05.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/wearables/boots/shared_boots_s05.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/wearables/boots/shared_boots_s05.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/boots/shared_boots_s05.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","boots_s05")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.647059 | 73 | 0.730684 | true | true | |
f73e771f59e4641872758fb300a5023ef1df2a59 | 6,116 | py | Python | mmdet3d/models/backbones/second_ran.py | xiaoMrzhang/mmdetection3d | 1e7695297e60afe3e09834de1582c3437086ed49 | [
"Apache-2.0"
] | null | null | null | mmdet3d/models/backbones/second_ran.py | xiaoMrzhang/mmdetection3d | 1e7695297e60afe3e09834de1582c3437086ed49 | [
"Apache-2.0"
] | null | null | null | mmdet3d/models/backbones/second_ran.py | xiaoMrzhang/mmdetection3d | 1e7695297e60afe3e09834de1582c3437086ed49 | [
"Apache-2.0"
] | 1 | 2021-07-21T05:17:24.000Z | 2021-07-21T05:17:24.000Z | from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import load_checkpoint, force_fp32
from torch import nn as nn
import torch
import numpy as np
from mmdet.models import BACKBONES
from mmdet3d.utils.soft_mask import SoftMask
@BACKBONES.register_module()
class SECOND_RAN(nn.Module):
"""Backbone network for SECOND with residual attention network
Args:
in_channels (int): Input channels.
out_channels (list[int]): Output channels for multi-scale feature maps.
layer_nums (list[int]): Number of layers in each stage.
layer_strides (list[int]): Strides of each stage.
norm_cfg (dict): Config dict of normalization layers.
conv_cfg (dict): Config dict of convolutional layers.
"""
def __init__(self,
in_channels=128,
out_channels=[128, 128, 256],
layer_nums=[3, 5, 5],
layer_strides=[2, 2, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
conv_cfg=dict(type='Conv2d', bias=False)):
super(SECOND_RAN, self).__init__()
assert len(layer_strides) == len(layer_nums)
assert len(out_channels) == len(layer_nums)
in_filters = [in_channels, *out_channels[:-1]]
# note that when stride > 1, conv2d with same padding isn't
# equal to pad-conv2d. we should use pad-conv2d.
blocks = []
for i, layer_num in enumerate(layer_nums):
block = [
build_conv_layer(
conv_cfg,
in_filters[i],
out_channels[i],
3,
stride=layer_strides[i],
padding=1),
build_norm_layer(norm_cfg, out_channels[i])[1],
nn.ReLU(inplace=True),
]
for j in range(layer_num):
block.append(
build_conv_layer(
conv_cfg,
out_channels[i],
out_channels[i],
3,
padding=1))
block.append(build_norm_layer(norm_cfg, out_channels[i])[1])
block.append(nn.ReLU(inplace=True))
block = nn.Sequential(*block)
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
first_layer_conv = build_conv_layer(
conv_cfg,
in_filters[0],
out_channels[0],
3,
stride=2,
padding=1)
first_bn = build_norm_layer(norm_cfg, out_channels[0])[1]
first_relu = nn.ReLU(inplace=True)
soft_mask = SoftMask(in_channels, [128, 128, 128], out_type=4)
self.soft_mask_block = nn.Sequential(first_layer_conv, first_bn, first_relu, soft_mask)
def init_weights(self, pretrained=None):
"""Initialize weights of the 2D backbone."""
# Do not initialize the conv layers
# to follow the original implementation
if isinstance(pretrained, str):
from mmdet3d.utils import get_root_logger
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor): Input with shape (N, C, H, W).
Returns:
tuple[torch.Tensor]: Multi-scale features.
"""
masks = self.soft_mask_block(x)
outs = []
for i in range(len(self.blocks)):
x = self.blocks[i](x)
# x = torch.mul(x, masks[i]) + x
outs.append(x)
return tuple([outs, masks])
@force_fp32(apply_to=('prediction'))
def focal_loss(self, prediction, target):
loss_dict = dict()
self.alpha = 2
self.beta = 4
positive_index = target.eq(1).float()
negative_index = target.lt(1).float()
negative_weights = torch.pow(1 - target, self.beta)
loss = 0.
# prediction = torch.clamp(prediction, 1e-3, .999)
positive_loss = torch.log(prediction + 1e-6) \
* torch.pow(1 - prediction, self.alpha) * positive_index
negative_loss = torch.log(1 - prediction + 1e-6) \
* torch.pow(prediction, self.alpha) * negative_weights * negative_index
num_positive = positive_index.float().sum()
positive_loss = positive_loss.sum()
negative_loss = negative_loss.sum()
if num_positive == 0:
loss -= negative_loss
else:
loss -= (positive_loss + negative_loss) / num_positive
loss_dict["loss_heatmap"] = loss
# dice loss
# intersection = (target * prediction).sum(axis=[1,2,3])
# dice_score = (2 * intersection + 1) / (target.sum(axis=[1,2,3]) + prediction.sum(axis=[1,2,3]) + 1)
# dice_loss = 1 - torch.mean(dice_score, axis=0)
# loss_dict["loss_dice"] = dice_loss * 0.2
# if torch.isnan(loss) or torch.isnan(dice_loss):
# import pdb;pdb.set_trace()
return loss_dict
@force_fp32(apply_to=('prediction'))
def loss(self, prediction, target):
positive_index = target.eq(1).float()
loss = 0.
loss_dict = dict()
positive_loss = torch.log(prediction + 1e-6) * positive_index
negative_loss = torch.log(1 - prediction + 1e-6) * (1 - positive_index)
num_positive = positive_index.float().sum()
num_negative = (1 - positive_index).float().sum()
positive_loss = positive_loss.sum()
negative_loss = negative_loss.sum()
bec_loss = -(positive_loss / (num_positive+1) + negative_loss / (num_negative+1))
loss_dict["loss_heatmap"] = bec_loss
# intersection = (target * prediction).sum(axis=[1,2,3])
# dice_score = (2 * intersection + 1) / (target.sum(axis=[1,2,3]) + prediction.sum(axis=[1,2,3]) + 1)
# dice_loss = 1 - dice_score.mean()
# loss_dict["loss_dice"] = dice_loss
return loss_dict
| 37.753086 | 109 | 0.571779 | from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import load_checkpoint, force_fp32
from torch import nn as nn
import torch
import numpy as np
from mmdet.models import BACKBONES
from mmdet3d.utils.soft_mask import SoftMask
@BACKBONES.register_module()
class SECOND_RAN(nn.Module):
def __init__(self,
in_channels=128,
out_channels=[128, 128, 256],
layer_nums=[3, 5, 5],
layer_strides=[2, 2, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
conv_cfg=dict(type='Conv2d', bias=False)):
super(SECOND_RAN, self).__init__()
assert len(layer_strides) == len(layer_nums)
assert len(out_channels) == len(layer_nums)
in_filters = [in_channels, *out_channels[:-1]]
# equal to pad-conv2d. we should use pad-conv2d.
blocks = []
for i, layer_num in enumerate(layer_nums):
block = [
build_conv_layer(
conv_cfg,
in_filters[i],
out_channels[i],
3,
stride=layer_strides[i],
padding=1),
build_norm_layer(norm_cfg, out_channels[i])[1],
nn.ReLU(inplace=True),
]
for j in range(layer_num):
block.append(
build_conv_layer(
conv_cfg,
out_channels[i],
out_channels[i],
3,
padding=1))
block.append(build_norm_layer(norm_cfg, out_channels[i])[1])
block.append(nn.ReLU(inplace=True))
block = nn.Sequential(*block)
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
first_layer_conv = build_conv_layer(
conv_cfg,
in_filters[0],
out_channels[0],
3,
stride=2,
padding=1)
first_bn = build_norm_layer(norm_cfg, out_channels[0])[1]
first_relu = nn.ReLU(inplace=True)
soft_mask = SoftMask(in_channels, [128, 128, 128], out_type=4)
self.soft_mask_block = nn.Sequential(first_layer_conv, first_bn, first_relu, soft_mask)
def init_weights(self, pretrained=None):
# Do not initialize the conv layers
# to follow the original implementation
if isinstance(pretrained, str):
from mmdet3d.utils import get_root_logger
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
def forward(self, x):
masks = self.soft_mask_block(x)
outs = []
for i in range(len(self.blocks)):
x = self.blocks[i](x)
# x = torch.mul(x, masks[i]) + x
outs.append(x)
return tuple([outs, masks])
@force_fp32(apply_to=('prediction'))
def focal_loss(self, prediction, target):
loss_dict = dict()
self.alpha = 2
self.beta = 4
positive_index = target.eq(1).float()
negative_index = target.lt(1).float()
negative_weights = torch.pow(1 - target, self.beta)
loss = 0.
# prediction = torch.clamp(prediction, 1e-3, .999)
positive_loss = torch.log(prediction + 1e-6) \
* torch.pow(1 - prediction, self.alpha) * positive_index
negative_loss = torch.log(1 - prediction + 1e-6) \
* torch.pow(prediction, self.alpha) * negative_weights * negative_index
num_positive = positive_index.float().sum()
positive_loss = positive_loss.sum()
negative_loss = negative_loss.sum()
if num_positive == 0:
loss -= negative_loss
else:
loss -= (positive_loss + negative_loss) / num_positive
loss_dict["loss_heatmap"] = loss
# dice loss
# intersection = (target * prediction).sum(axis=[1,2,3])
# dice_score = (2 * intersection + 1) / (target.sum(axis=[1,2,3]) + prediction.sum(axis=[1,2,3]) + 1)
# dice_loss = 1 - torch.mean(dice_score, axis=0)
# loss_dict["loss_dice"] = dice_loss * 0.2
# if torch.isnan(loss) or torch.isnan(dice_loss):
# import pdb;pdb.set_trace()
return loss_dict
@force_fp32(apply_to=('prediction'))
def loss(self, prediction, target):
positive_index = target.eq(1).float()
loss = 0.
loss_dict = dict()
positive_loss = torch.log(prediction + 1e-6) * positive_index
negative_loss = torch.log(1 - prediction + 1e-6) * (1 - positive_index)
num_positive = positive_index.float().sum()
num_negative = (1 - positive_index).float().sum()
positive_loss = positive_loss.sum()
negative_loss = negative_loss.sum()
bec_loss = -(positive_loss / (num_positive+1) + negative_loss / (num_negative+1))
loss_dict["loss_heatmap"] = bec_loss
# intersection = (target * prediction).sum(axis=[1,2,3])
# dice_score = (2 * intersection + 1) / (target.sum(axis=[1,2,3]) + prediction.sum(axis=[1,2,3]) + 1)
# dice_loss = 1 - dice_score.mean()
# loss_dict["loss_dice"] = dice_loss
return loss_dict
| true | true |
f73e77302b49e772ad9640b447d18902ceafc55a | 6,645 | py | Python | cryptograph/shuffled_shift_cipher.py | slowy07/pythonApps | 22f9766291dbccd8185035745950c5ee4ebd6a3e | [
"MIT"
] | 10 | 2020-10-09T11:05:18.000Z | 2022-02-13T03:22:10.000Z | cryptograph/shuffled_shift_cipher.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | null | null | null | cryptograph/shuffled_shift_cipher.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | 6 | 2020-11-26T12:49:43.000Z | 2022-03-06T06:46:43.000Z | import random
import string
class ShuffledShiftCipher:
"""
This algorithm uses the Caesar Cipher algorithm but removes the option to
use brute force to decrypt the message.
The passcode is a a random password from the selection buffer of
1. uppercase letters of the English alphabet
2. lowercase letters of the English alphabet
3. digits from 0 to 9
Using unique characters from the passcode, the normal list of characters,
that can be allowed in the plaintext, is pivoted and shuffled. Refer to docstring
of __make_key_list() to learn more about the shuffling.
Then, using the passcode, a number is calculated which is used to encrypt the
plaintext message with the normal shift cipher method, only in this case, the
reference, to look back at while decrypting, is shuffled.
Each cipher object can possess an optional argument as passcode, without which a
new passcode is generated for that object automatically.
cip1 = ShuffledShiftCipher('d4usr9TWxw9wMD')
cip2 = ShuffledShiftCipher()
"""
def __init__(self, passcode: str = None):
"""
Initializes a cipher object with a passcode as it's entity
Note: No new passcode is generated if user provides a passcode
while creating the object
"""
self.__passcode = passcode or self.__passcode_creator()
self.__key_list = self.__make_key_list()
self.__shift_key = self.__make_shift_key()
def __str__(self):
"""
:return: passcode of the cipher object
"""
return "Passcode is: " + "".join(self.__passcode)
def __neg_pos(self, iterlist: list) -> list:
"""
Mutates the list by changing the sign of each alternate element
:param iterlist: takes a list iterable
:return: the mutated list
"""
for i in range(1, len(iterlist), 2):
iterlist[i] *= -1
return iterlist
def __passcode_creator(self) -> list:
"""
Creates a random password from the selection buffer of
1. uppercase letters of the English alphabet
2. lowercase letters of the English alphabet
3. digits from 0 to 9
:rtype: list
:return: a password of a random length between 10 to 20
"""
choices = string.ascii_letters + string.digits
password = [random.choice(choices) for i in range(random.randint(10, 20))]
return password
def __make_key_list(self) -> list:
"""
Shuffles the ordered character choices by pivoting at breakpoints
Breakpoints are the set of characters in the passcode
eg:
if, ABCDEFGHIJKLMNOPQRSTUVWXYZ are the possible characters
and CAMERA is the passcode
then, breakpoints = [A,C,E,M,R] # sorted set of characters from passcode
shuffled parts: [A,CB,ED,MLKJIHGF,RQPON,ZYXWVUTS]
shuffled __key_list : ACBEDMLKJIHGFRQPONZYXWVUTS
Shuffling only 26 letters of the english alphabet can generate 26!
combinations for the shuffled list. In the program we consider, a set of
97 characters (including letters, digits, punctuation and whitespaces),
thereby creating a possibility of 97! combinations (which is a 152 digit number
in itself), thus diminishing the possibility of a brute force approach.
Moreover, shift keys even introduce a multiple of 26 for a brute force approach
for each of the already 97! combinations.
"""
# key_list_options contain nearly all printable except few elements from
# string.whitespace
key_list_options = (
string.ascii_letters + string.digits + string.punctuation + " \t\n"
)
keys_l = []
# creates points known as breakpoints to break the key_list_options at those
# points and pivot each substring
breakpoints = sorted(set(self.__passcode))
temp_list = []
# algorithm for creating a new shuffled list, keys_l, out of key_list_options
for i in key_list_options:
temp_list.extend(i)
# checking breakpoints at which to pivot temporary sublist and add it into
# keys_l
if i in breakpoints or i == key_list_options[-1]:
keys_l.extend(temp_list[::-1])
temp_list = []
# returning a shuffled keys_l to prevent brute force guessing of shift key
return keys_l
def __make_shift_key(self) -> int:
"""
sum() of the mutated list of ascii values of all characters where the
mutated list is the one returned by __neg_pos()
"""
num = sum(self.__neg_pos([ord(x) for x in self.__passcode]))
return num if num > 0 else len(self.__passcode)
def decrypt(self, encoded_message: str) -> str:
"""
Performs shifting of the encoded_message w.r.t. the shuffled __key_list
to create the decoded_message
>>> ssc = ShuffledShiftCipher('4PYIXyqeQZr44')
>>> ssc.decrypt("d>**-1z6&'5z'5z:z+-='$'>=zp:>5:#z<'.&>#")
'Hello, this is a modified Caesar cipher'
"""
decoded_message = ""
# decoding shift like Caesar cipher algorithm implementing negative shift or
# reverse shift or left shift
for i in encoded_message:
position = self.__key_list.index(i)
decoded_message += self.__key_list[
(position - self.__shift_key) % -len(self.__key_list)
]
return decoded_message
def encrypt(self, plaintext: str) -> str:
"""
Performs shifting of the plaintext w.r.t. the shuffled __key_list
to create the encoded_message
>>> ssc = ShuffledShiftCipher('4PYIXyqeQZr44')
>>> ssc.encrypt('Hello, this is a modified Caesar cipher')
"d>**-1z6&'5z'5z:z+-='$'>=zp:>5:#z<'.&>#"
"""
encoded_message = ""
# encoding shift like Caesar cipher algorithm implementing positive shift or
# forward shift or right shift
for i in plaintext:
position = self.__key_list.index(i)
encoded_message += self.__key_list[
(position + self.__shift_key) % len(self.__key_list)
]
return encoded_message
def test_end_to_end(msg: str = "Hello, this is a modified Caesar cipher"):
"""
>>> test_end_to_end()
'Hello, this is a modified Caesar cipher'
"""
cip1 = ShuffledShiftCipher()
return cip1.decrypt(cip1.encrypt(msg))
if __name__ == "__main__":
import doctest
doctest.testmod() | 39.319527 | 87 | 0.642137 | import random
import string
class ShuffledShiftCipher:
def __init__(self, passcode: str = None):
self.__passcode = passcode or self.__passcode_creator()
self.__key_list = self.__make_key_list()
self.__shift_key = self.__make_shift_key()
def __str__(self):
return "Passcode is: " + "".join(self.__passcode)
def __neg_pos(self, iterlist: list) -> list:
for i in range(1, len(iterlist), 2):
iterlist[i] *= -1
return iterlist
def __passcode_creator(self) -> list:
choices = string.ascii_letters + string.digits
password = [random.choice(choices) for i in range(random.randint(10, 20))]
return password
def __make_key_list(self) -> list:
key_list_options = (
string.ascii_letters + string.digits + string.punctuation + " \t\n"
)
keys_l = []
breakpoints = sorted(set(self.__passcode))
temp_list = []
for i in key_list_options:
temp_list.extend(i)
if i in breakpoints or i == key_list_options[-1]:
keys_l.extend(temp_list[::-1])
temp_list = []
return keys_l
def __make_shift_key(self) -> int:
num = sum(self.__neg_pos([ord(x) for x in self.__passcode]))
return num if num > 0 else len(self.__passcode)
def decrypt(self, encoded_message: str) -> str:
decoded_message = ""
for i in encoded_message:
position = self.__key_list.index(i)
decoded_message += self.__key_list[
(position - self.__shift_key) % -len(self.__key_list)
]
return decoded_message
def encrypt(self, plaintext: str) -> str:
encoded_message = ""
for i in plaintext:
position = self.__key_list.index(i)
encoded_message += self.__key_list[
(position + self.__shift_key) % len(self.__key_list)
]
return encoded_message
def test_end_to_end(msg: str = "Hello, this is a modified Caesar cipher"):
cip1 = ShuffledShiftCipher()
return cip1.decrypt(cip1.encrypt(msg))
if __name__ == "__main__":
import doctest
doctest.testmod() | true | true |
f73e773e60a9f42784cc575883483b797c6f12bc | 6,091 | py | Python | mac/google-cloud-sdk/lib/third_party/kubernetes/client/models/v1_config_map_list.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | mac/google-cloud-sdk/lib/third_party/kubernetes/client/models/v1_config_map_list.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | mac/google-cloud-sdk/lib/third_party/kubernetes/client/models/v1_config_map_list.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ConfigMapList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name and the value is attribute
type.
attribute_map (dict): The key is attribute name and the value is json key
in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1ConfigMap]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1ConfigMapList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1ConfigMapList.
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1ConfigMapList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1ConfigMapList.
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1ConfigMapList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1ConfigMapList.
Items is the list of ConfigMaps.
:return: The items of this V1ConfigMapList.
:rtype: list[V1ConfigMap]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1ConfigMapList.
Items is the list of ConfigMaps.
:param items: The items of this V1ConfigMapList.
:type: list[V1ConfigMap]
"""
if items is None:
raise ValueError('Invalid value for `items`, must not be `None`')
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1ConfigMapList.
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1ConfigMapList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1ConfigMapList.
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1ConfigMapList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1ConfigMapList.
More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The metadata of this V1ConfigMapList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1ConfigMapList.
More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1ConfigMapList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ConfigMapList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.686364 | 86 | 0.633394 |
from pprint import pformat
from six import iteritems
import re
class V1ConfigMapList(object):
swagger_types = {
'api_version': 'str',
'items': 'list[V1ConfigMap]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, api_version):
self._api_version = api_version
@property
def items(self):
return self._items
@items.setter
def items(self, items):
if items is None:
raise ValueError('Invalid value for `items`, must not be `None`')
self._items = items
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = metadata
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1ConfigMapList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73e778263be173557d061221305df04791bb62a | 4,540 | py | Python | tests/test_models/test_recognizers/test_recognizer2d.py | ZJU-lishuang/mmaction2 | dc46505319968eff0123eb5abb811969042377c5 | [
"Apache-2.0"
] | 2 | 2021-12-07T09:09:10.000Z | 2022-01-26T01:52:07.000Z | tests/test_models/test_recognizers/test_recognizer2d.py | github-zbx/mmaction2 | ee34d952e792fd1adea2c2e397b29faff68eaec9 | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_recognizers/test_recognizer2d.py | github-zbx/mmaction2 | ee34d952e792fd1adea2c2e397b29faff68eaec9 | [
"Apache-2.0"
] | null | null | null | import torch
from mmaction.models import build_recognizer
from ..base import generate_recognizer_demo_inputs, get_recognizer_cfg
def test_tsn():
config = get_recognizer_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_tsm():
config = get_recognizer_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test twice sample + 3 crops
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_tpn():
config = get_recognizer_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 224, 224)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
assert 'loss_aux' in losses and 'loss_cls' in losses
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test forward dummy
with torch.no_grad():
_recognizer = build_recognizer(config.model)
img_list = [img[None, :] for img in imgs]
if hasattr(_recognizer, 'forward_dummy'):
_recognizer.forward = _recognizer.forward_dummy
for one_img in img_list:
_recognizer(one_img)
def test_tanet():
config = get_recognizer_cfg(
'tanet/tanet_r50_dense_1x1x8_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test twice sample + 3 crops
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
| 29.868421 | 76 | 0.678414 | import torch
from mmaction.models import build_recognizer
from ..base import generate_recognizer_demo_inputs, get_recognizer_cfg
def test_tsn():
config = get_recognizer_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_tsm():
config = get_recognizer_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_tpn():
config = get_recognizer_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 224, 224)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
assert 'loss_aux' in losses and 'loss_cls' in losses
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
with torch.no_grad():
_recognizer = build_recognizer(config.model)
img_list = [img[None, :] for img in imgs]
if hasattr(_recognizer, 'forward_dummy'):
_recognizer.forward = _recognizer.forward_dummy
for one_img in img_list:
_recognizer(one_img)
def test_tanet():
config = get_recognizer_cfg(
'tanet/tanet_r50_dense_1x1x8_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
| true | true |
f73e7786569284959910b2e0ec7e9f936bc294f0 | 806 | py | Python | manage.py | elishaking/polynize | 2bb61c9682ce42ea22b3beded441ce6e2b59c56c | [
"MIT"
] | null | null | null | manage.py | elishaking/polynize | 2bb61c9682ce42ea22b3beded441ce6e2b59c56c | [
"MIT"
] | 2 | 2020-06-06T00:36:43.000Z | 2021-06-10T22:31:45.000Z | manage.py | elishaking/polynize | 2bb61c9682ce42ea22b3beded441ce6e2b59c56c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "polynize.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.043478 | 77 | 0.64268 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "polynize.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| true | true |
f73e795e144d5d39eb6d660bdd6f621515320341 | 5,754 | py | Python | Lib/site-packages/anyio/abc/_sockets.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | 3 | 2018-08-21T13:46:08.000Z | 2018-09-24T18:09:48.000Z | Lib/site-packages/anyio/abc/_sockets.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | 1 | 2018-10-01T01:38:04.000Z | 2018-10-01T01:38:04.000Z | Lib/site-packages/anyio/abc/_sockets.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | import socket
from abc import abstractmethod
from io import IOBase
from ipaddress import IPv4Address, IPv6Address
from socket import AddressFamily
from types import TracebackType
from typing import (
Any,
AsyncContextManager,
Callable,
Collection,
Dict,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
from .._core._typedattr import (
TypedAttributeProvider,
TypedAttributeSet,
typed_attribute,
)
from ._streams import ByteStream, Listener, T_Stream, UnreliableObjectStream
from ._tasks import TaskGroup
IPAddressType = Union[str, IPv4Address, IPv6Address]
IPSockAddrType = Tuple[str, int]
SockAddrType = Union[IPSockAddrType, str]
UDPPacketType = Tuple[bytes, IPSockAddrType]
T_Retval = TypeVar("T_Retval")
class _NullAsyncContextManager:
async def __aenter__(self) -> None:
pass
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
return None
class SocketAttribute(TypedAttributeSet):
#: the address family of the underlying socket
family: AddressFamily = typed_attribute()
#: the local socket address of the underlying socket
local_address: SockAddrType = typed_attribute()
#: for IP addresses, the local port the underlying socket is bound to
local_port: int = typed_attribute()
#: the underlying stdlib socket object
raw_socket: socket.socket = typed_attribute()
#: the remote address the underlying socket is connected to
remote_address: SockAddrType = typed_attribute()
#: for IP addresses, the remote port the underlying socket is connected to
remote_port: int = typed_attribute()
class _SocketProvider(TypedAttributeProvider):
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
from .._core._sockets import convert_ipv6_sockaddr as convert
attributes: Dict[Any, Callable[[], Any]] = {
SocketAttribute.family: lambda: self._raw_socket.family,
SocketAttribute.local_address: lambda: convert(
self._raw_socket.getsockname()
),
SocketAttribute.raw_socket: lambda: self._raw_socket,
}
try:
peername: Optional[Tuple[str, int]] = convert(
self._raw_socket.getpeername()
)
except OSError:
peername = None
# Provide the remote address for connected sockets
if peername is not None:
attributes[SocketAttribute.remote_address] = lambda: peername
# Provide local and remote ports for IP based sockets
if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):
attributes[
SocketAttribute.local_port
] = lambda: self._raw_socket.getsockname()[1]
if peername is not None:
remote_port = peername[1]
attributes[SocketAttribute.remote_port] = lambda: remote_port
return attributes
@property
@abstractmethod
def _raw_socket(self) -> socket.socket:
pass
class SocketStream(ByteStream, _SocketProvider):
"""
Transports bytes over a socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
class UNIXSocketStream(SocketStream):
@abstractmethod
async def send_fds(
self, message: bytes, fds: Collection[Union[int, IOBase]]
) -> None:
"""
Send file descriptors along with a message to the peer.
:param message: a non-empty bytestring
:param fds: a collection of files (either numeric file descriptors or open file or socket
objects)
"""
@abstractmethod
async def receive_fds(self, msglen: int, maxfds: int) -> Tuple[bytes, List[int]]:
"""
Receive file descriptors along with a message from the peer.
:param msglen: length of the message to expect from the peer
:param maxfds: maximum number of file descriptors to expect from the peer
:return: a tuple of (message, file descriptors)
"""
class SocketListener(Listener[SocketStream], _SocketProvider):
"""
Listens to incoming socket connections.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
@abstractmethod
async def accept(self) -> SocketStream:
"""Accept an incoming connection."""
async def serve(
self, handler: Callable[[T_Stream], Any], task_group: Optional[TaskGroup] = None
) -> None:
from .. import create_task_group
context_manager: AsyncContextManager
if task_group is None:
task_group = context_manager = create_task_group()
else:
# Can be replaced with AsyncExitStack once on py3.7+
context_manager = _NullAsyncContextManager()
async with context_manager:
while True:
stream = await self.accept()
task_group.start_soon(handler, stream)
class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):
"""
Represents an unconnected UDP socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
async def sendto(self, data: bytes, host: str, port: int) -> None:
"""Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port)))."""
return await self.send((data, (host, port)))
class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):
"""
Represents an connected UDP socket.
Supports all relevant extra attributes from :class:`~SocketAttribute`.
"""
| 31.271739 | 97 | 0.669621 | import socket
from abc import abstractmethod
from io import IOBase
from ipaddress import IPv4Address, IPv6Address
from socket import AddressFamily
from types import TracebackType
from typing import (
Any,
AsyncContextManager,
Callable,
Collection,
Dict,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
from .._core._typedattr import (
TypedAttributeProvider,
TypedAttributeSet,
typed_attribute,
)
from ._streams import ByteStream, Listener, T_Stream, UnreliableObjectStream
from ._tasks import TaskGroup
IPAddressType = Union[str, IPv4Address, IPv6Address]
IPSockAddrType = Tuple[str, int]
SockAddrType = Union[IPSockAddrType, str]
UDPPacketType = Tuple[bytes, IPSockAddrType]
T_Retval = TypeVar("T_Retval")
class _NullAsyncContextManager:
async def __aenter__(self) -> None:
pass
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
return None
class SocketAttribute(TypedAttributeSet):
family: AddressFamily = typed_attribute()
local_address: SockAddrType = typed_attribute()
local_port: int = typed_attribute()
raw_socket: socket.socket = typed_attribute()
remote_address: SockAddrType = typed_attribute()
remote_port: int = typed_attribute()
class _SocketProvider(TypedAttributeProvider):
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
from .._core._sockets import convert_ipv6_sockaddr as convert
attributes: Dict[Any, Callable[[], Any]] = {
SocketAttribute.family: lambda: self._raw_socket.family,
SocketAttribute.local_address: lambda: convert(
self._raw_socket.getsockname()
),
SocketAttribute.raw_socket: lambda: self._raw_socket,
}
try:
peername: Optional[Tuple[str, int]] = convert(
self._raw_socket.getpeername()
)
except OSError:
peername = None
if peername is not None:
attributes[SocketAttribute.remote_address] = lambda: peername
if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):
attributes[
SocketAttribute.local_port
] = lambda: self._raw_socket.getsockname()[1]
if peername is not None:
remote_port = peername[1]
attributes[SocketAttribute.remote_port] = lambda: remote_port
return attributes
@property
@abstractmethod
def _raw_socket(self) -> socket.socket:
pass
class SocketStream(ByteStream, _SocketProvider):
class UNIXSocketStream(SocketStream):
@abstractmethod
async def send_fds(
self, message: bytes, fds: Collection[Union[int, IOBase]]
) -> None:
@abstractmethod
async def receive_fds(self, msglen: int, maxfds: int) -> Tuple[bytes, List[int]]:
class SocketListener(Listener[SocketStream], _SocketProvider):
@abstractmethod
async def accept(self) -> SocketStream:
async def serve(
self, handler: Callable[[T_Stream], Any], task_group: Optional[TaskGroup] = None
) -> None:
from .. import create_task_group
context_manager: AsyncContextManager
if task_group is None:
task_group = context_manager = create_task_group()
else:
context_manager = _NullAsyncContextManager()
async with context_manager:
while True:
stream = await self.accept()
task_group.start_soon(handler, stream)
class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):
async def sendto(self, data: bytes, host: str, port: int) -> None:
return await self.send((data, (host, port)))
class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):
| true | true |
f73e7af60c427337b8dd751eb65b2561e7bb53ac | 111 | py | Python | 12. Dread Door.py | Zfauser/Code-Combat-Introductory-To-Computer-Science-Python-Answers | 231d17ad2224fc616c022b515bc14e78ec5822f9 | [
"MIT"
] | 1 | 2021-02-25T16:43:08.000Z | 2021-02-25T16:43:08.000Z | 12. Dread Door.py | Zfauser/Code-Combat-Introductory-To-Computer-Science-Python-Answers | 231d17ad2224fc616c022b515bc14e78ec5822f9 | [
"MIT"
] | null | null | null | 12. Dread Door.py | Zfauser/Code-Combat-Introductory-To-Computer-Science-Python-Answers | 231d17ad2224fc616c022b515bc14e78ec5822f9 | [
"MIT"
] | null | null | null | # Attack the door!
# It will take many hits, so use a "while-true" loop.
while True:
hero.attack("Door")
| 15.857143 | 53 | 0.657658 |
while True:
hero.attack("Door")
| true | true |
f73e7b20481d743363e35c69175236420ddb689f | 73,825 | py | Python | test/test_utils.py | pento/yt-dlp | a46a815b055231d056aeb82aa3f37cd864322d73 | [
"Unlicense"
] | 1 | 2021-08-03T03:14:17.000Z | 2021-08-03T03:14:17.000Z | test/test_utils.py | pento/yt-dlp | a46a815b055231d056aeb82aa3f37cd864322d73 | [
"Unlicense"
] | null | null | null | test/test_utils.py | pento/yt-dlp | a46a815b055231d056aeb82aa3f37cd864322d73 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Various small unit tests
import io
import itertools
import json
import xml.etree.ElementTree
from yt_dlp.utils import (
age_restricted,
args_to_str,
encode_base_n,
caesar,
clean_html,
clean_podcast_url,
date_from_str,
datetime_from_str,
DateRange,
detect_exe_version,
determine_ext,
dict_get,
encode_compat_str,
encodeFilename,
escape_rfc3986,
escape_url,
extract_attributes,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
get_element_by_class,
get_element_by_attribute,
get_elements_by_class,
get_elements_by_attribute,
InAdvancePagedList,
int_or_none,
intlist_to_bytes,
is_html,
js_to_json,
limit_length,
merge_dicts,
mimetype2ext,
month_by_name,
multipart_encode,
ohdave_rsa_encrypt,
OnDemandPagedList,
orderedSet,
parse_age_limit,
parse_duration,
parse_filesize,
parse_count,
parse_iso8601,
parse_resolution,
parse_bitrate,
pkcs1pad,
read_batch_urls,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
expand_path,
prepend_extension,
replace_extension,
remove_start,
remove_end,
remove_quotes,
rot47,
shell_quote,
smuggle_url,
str_to_int,
strip_jsonp,
strip_or_none,
subtitles_filename,
timeconvert,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
uppercase_escape,
lowercase_escape,
url_basename,
url_or_none,
base_url,
urljoin,
urlencode_postdata,
urshift,
update_url_query,
version_tuple,
xpath_with_ns,
xpath_element,
xpath_text,
xpath_attr,
render_table,
match_str,
parse_dfxp_time_expr,
dfxp2srt,
cli_option,
cli_valueless_option,
cli_bool_option,
parse_codecs,
iri_to_uri,
LazyList,
)
from yt_dlp.compat import (
compat_chr,
compat_etree_fromstring,
compat_getenv,
compat_os_name,
compat_setenv,
compat_urlparse,
compat_parse_qs,
)
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename(''), '')
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'aäb\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
self.assertEqual(sanitize_filename(
'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True),
'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYTHssaaaaaaaeceeeeiiiionooooooooeuuuuuythy')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
if sys.platform != 'win32':
return
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
self.assertEqual(sanitize_path('abc|def'), 'abc#def')
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
self.assertEqual(
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
self.assertEqual(sanitize_path('../abc'), '..\\abc')
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
def test_sanitize_url(self):
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('foo bar'), 'foo bar')
def test_extract_basic_auth(self):
auth_header = lambda url: sanitized_Request(url).get_header('Authorization')
self.assertFalse(auth_header('http://foo.bar'))
self.assertFalse(auth_header('http://:foo.bar'))
self.assertEqual(auth_header('http://@foo.bar'), 'Basic Og==')
self.assertEqual(auth_header('http://:pass@foo.bar'), 'Basic OnBhc3M=')
self.assertEqual(auth_header('http://user:@foo.bar'), 'Basic dXNlcjo=')
self.assertEqual(auth_header('http://user:pass@foo.bar'), 'Basic dXNlcjpwYXNz')
def test_expand_path(self):
def env(var):
return '%{0}%'.format(var) if sys.platform == 'win32' else '${0}'.format(var)
compat_setenv('yt_dlp_EXPATH_PATH', 'expanded')
self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded')
self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME'))
self.assertEqual(expand_path('~'), compat_getenv('HOME'))
self.assertEqual(
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')),
'%s/expanded' % compat_getenv('HOME'))
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
def test_replace_extension(self):
self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
def test_subtitles_filename(self):
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.unexpected_ext', 'en', 'vtt', 'ext'), 'abc.unexpected_ext.en.vtt')
def test_remove_start(self):
self.assertEqual(remove_start(None, 'A - '), None)
self.assertEqual(remove_start('A - B', 'A - '), 'B')
self.assertEqual(remove_start('B - A', 'A - '), 'B - A')
def test_remove_end(self):
self.assertEqual(remove_end(None, ' - B'), None)
self.assertEqual(remove_end('A - B', ' - B'), 'A')
self.assertEqual(remove_end('B - A', ' - B'), 'B - A')
def test_remove_quotes(self):
self.assertEqual(remove_quotes(None), None)
self.assertEqual(remove_quotes('"'), '"')
self.assertEqual(remove_quotes("'"), "'")
self.assertEqual(remove_quotes(';'), ';')
self.assertEqual(remove_quotes('";'), '";')
self.assertEqual(remove_quotes('""'), '')
self.assertEqual(remove_quotes('";"'), ';')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('é'), 'é')
self.assertEqual(unescapeHTML('�'), '�')
self.assertEqual(unescapeHTML('&a"'), '&a"')
# HTML5 entities
self.assertEqual(unescapeHTML('.''), '.\'')
def test_date_from_str(self):
self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day'))
self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week'))
self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week'))
self.assertEqual(date_from_str('20200229+365day'), date_from_str('20200229+1year'))
self.assertEqual(date_from_str('20210131+28day'), date_from_str('20210131+1month'))
def test_datetime_from_str(self):
self.assertEqual(datetime_from_str('yesterday', precision='day'), datetime_from_str('now-1day', precision='auto'))
self.assertEqual(datetime_from_str('now+7day', precision='day'), datetime_from_str('now+1week', precision='auto'))
self.assertEqual(datetime_from_str('now+14day', precision='day'), datetime_from_str('now+2week', precision='auto'))
self.assertEqual(datetime_from_str('20200229+365day', precision='day'), datetime_from_str('20200229+1year', precision='auto'))
self.assertEqual(datetime_from_str('20210131+28day', precision='day'), datetime_from_str('20210131+1month', precision='auto'))
self.assertEqual(datetime_from_str('20210131+59day', precision='day'), datetime_from_str('20210131+2month', precision='auto'))
self.assertEqual(datetime_from_str('now+1day', precision='hour'), datetime_from_str('now+24hours', precision='auto'))
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
self.assertEqual(
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
'20141126')
self.assertEqual(
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227')
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_strdate('Feb 7, 2016 at 6:35 pm'), '20160207')
self.assertEqual(unified_strdate('July 15th, 2013'), '20130715')
self.assertEqual(unified_strdate('September 1st, 2013'), '20130901')
self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902')
self.assertEqual(unified_strdate('November 3rd, 2019'), '20191103')
self.assertEqual(unified_strdate('October 23rd, 2005'), '20051023')
def test_unified_timestamps(self):
self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600)
self.assertEqual(unified_timestamp('8/7/2009'), 1247011200)
self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200)
self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598)
self.assertEqual(unified_timestamp('1968 12 10'), -33436800)
self.assertEqual(unified_timestamp('1968-12-10'), -33436800)
self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200)
self.assertEqual(
unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False),
1417001400)
self.assertEqual(
unified_timestamp('2/2/2015 6:47:40 PM', day_first=False),
1422902860)
self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900)
self.assertEqual(unified_timestamp('25-09-2014'), 1411603200)
self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200)
self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_timestamp('May 16, 2016 11:15 PM'), 1463440500)
self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100)
self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361)
self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
def test_determine_ext(self):
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8')
self.assertEqual(determine_ext('foobar', None), None)
def test_find_xpath_attr(self):
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
<node x="" />
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = compat_etree_fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_xpath_element(self):
doc = xml.etree.ElementTree.Element('root')
div = xml.etree.ElementTree.SubElement(doc, 'div')
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
self.assertEqual(xpath_element(doc, ['div/p']), p)
self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
self.assertTrue(xpath_element(doc, ['div/bar']) is None)
self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True)
def test_xpath_text(self):
testxml = '''<root>
<div>
<p>Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
def test_xpath_attr(self):
testxml = '''<root>
<div>
<p x="a">Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
smug_url = smuggle_url(url, {'a': 'b'})
smug_smug_url = smuggle_url(smug_url, {'c': 'd'})
res_url, res_data = unsmuggle_url(smug_smug_url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, {'a': 'b', 'c': 'd'})
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(
shell_quote(args),
"""ffmpeg -i 'ñ€ß'"'"'.mp4'""" if compat_os_name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''')
def test_float_or_none(self):
self.assertEqual(float_or_none('42.42'), 42.42)
self.assertEqual(float_or_none('42'), 42.0)
self.assertEqual(float_or_none(''), None)
self.assertEqual(float_or_none(None), None)
self.assertEqual(float_or_none([]), None)
self.assertEqual(float_or_none(set()), None)
def test_int_or_none(self):
self.assertEqual(int_or_none('42'), 42)
self.assertEqual(int_or_none(''), None)
self.assertEqual(int_or_none(None), None)
self.assertEqual(int_or_none([]), None)
self.assertEqual(int_or_none(set()), None)
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
self.assertEqual(str_to_int(523), 523)
# Python 3 has no long
if sys.version_info < (3, 0):
eval('self.assertEqual(str_to_int(123456L), 123456)')
self.assertEqual(str_to_int('noninteger'), None)
self.assertEqual(str_to_int([]), None)
def test_url_basename(self):
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_base_url(self):
self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/')
def test_urljoin(self):
self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(['foobar'], 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', None), None)
self.assertEqual(urljoin('http://foo.de/', ''), None)
self.assertEqual(urljoin('http://foo.de/', ['foobar']), None)
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', '.././../d.txt'), 'http://foo.de/d.txt')
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', 'rtmp://foo.de'), 'rtmp://foo.de')
self.assertEqual(urljoin(None, 'rtmp://foo.de'), 'rtmp://foo.de')
def test_url_or_none(self):
self.assertEqual(url_or_none(None), None)
self.assertEqual(url_or_none(''), None)
self.assertEqual(url_or_none('foo'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('https://foo.de'), 'https://foo.de')
self.assertEqual(url_or_none('http$://foo.de'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('//foo.de'), '//foo.de')
self.assertEqual(url_or_none('s3://foo.de'), None)
self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de')
self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de')
self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de')
self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de')
def test_parse_age_limit(self):
self.assertEqual(parse_age_limit(None), None)
self.assertEqual(parse_age_limit(False), None)
self.assertEqual(parse_age_limit('invalid'), None)
self.assertEqual(parse_age_limit(0), 0)
self.assertEqual(parse_age_limit(18), 18)
self.assertEqual(parse_age_limit(21), 21)
self.assertEqual(parse_age_limit(22), None)
self.assertEqual(parse_age_limit('18'), 18)
self.assertEqual(parse_age_limit('18+'), 18)
self.assertEqual(parse_age_limit('PG-13'), 13)
self.assertEqual(parse_age_limit('TV-14'), 14)
self.assertEqual(parse_age_limit('TV-MA'), 17)
self.assertEqual(parse_age_limit('TV14'), 14)
self.assertEqual(parse_age_limit('TV_G'), 0)
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration(False), None)
self.assertEqual(parse_duration('invalid'), None)
self.assertEqual(parse_duration('1'), 1)
self.assertEqual(parse_duration('1337:12'), 80232)
self.assertEqual(parse_duration('9:12:43'), 33163)
self.assertEqual(parse_duration('12:00'), 720)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
self.assertEqual(parse_duration('5 s'), 5)
self.assertEqual(parse_duration('3 min'), 180)
self.assertEqual(parse_duration('2.5 hours'), 9000)
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
self.assertEqual(parse_duration('87 Min.'), 5220)
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88)
def test_fix_xml_ampersands(self):
self.assertEqual(
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
self.assertEqual(
fix_xml_ampersands('"&x=y&wrong;&z=a'),
'"&x=y&wrong;&z=a')
self.assertEqual(
fix_xml_ampersands('&'><"'),
'&'><"')
self.assertEqual(
fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼')
self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#')
def test_paged_list(self):
def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum):
firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize)
for i in range(firstid, upto):
yield i
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (4,), [4])
testPL(5, 2, (0, 3), [0, 1, 2])
testPL(5, 2, (1, 4), [1, 2, 3])
testPL(5, 2, (2, 99), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_read_batch_urls(self):
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
def test_update_url_query(self):
def query_dict(url):
return compat_parse_qs(compat_urlparse.urlparse(url).query)
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
query_dict('http://example.com/path?quality=HD&format=mp4'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path?manifest=f4m', {'manifest': []})),
query_dict('http://example.com/path'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
query_dict('http://example.com/path?system=LINUX'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'width': 1080, 'height': 720})),
query_dict('http://example.com/path?width=1080&height=720'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'bitrate': 5020.43})),
query_dict('http://example.com/path?bitrate=5020.43'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'test': '第二行тест'})),
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
def test_multipart_encode(self):
self.assertEqual(
multipart_encode({b'field': b'value'}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="field"\r\n\r\nvalue\r\n--AAAAAA--\r\n')
self.assertEqual(
multipart_encode({'欄位'.encode('utf-8'): '值'.encode('utf-8')}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="\xe6\xac\x84\xe4\xbd\x8d"\r\n\r\n\xe5\x80\xbc\r\n--AAAAAA--\r\n')
self.assertRaises(
ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
def test_dict_get(self):
FALSE_VALUES = {
'none': None,
'false': False,
'zero': 0,
'empty_string': '',
'empty_list': [],
}
d = FALSE_VALUES.copy()
d['a'] = 42
self.assertEqual(dict_get(d, 'a'), 42)
self.assertEqual(dict_get(d, 'b'), None)
self.assertEqual(dict_get(d, 'b', 42), 42)
self.assertEqual(dict_get(d, ('a', )), 42)
self.assertEqual(dict_get(d, ('b', 'a', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', )), None)
self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
for key, false_value in FALSE_VALUES.items():
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
def test_merge_dicts(self):
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': None}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': ''}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {}), {'a': 1})
self.assertEqual(merge_dicts({'a': None}, {'a': 1}), {'a': 1})
self.assertEqual(merge_dicts({'a': ''}, {'a': 1}), {'a': ''})
self.assertEqual(merge_dicts({'a': ''}, {'a': 'abc'}), {'a': 'abc'})
self.assertEqual(merge_dicts({'a': None}, {'a': ''}, {'a': 'abc'}), {'a': 'abc'})
def test_encode_compat_str(self):
self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251)
self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
stripped = strip_jsonp('ps.embedHandler({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && window.cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
def test_strip_or_none(self):
self.assertEqual(strip_or_none(' abc'), 'abc')
self.assertEqual(strip_or_none('abc '), 'abc')
self.assertEqual(strip_or_none(' abc '), 'abc')
self.assertEqual(strip_or_none('\tabc\t'), 'abc')
self.assertEqual(strip_or_none('\n\tabc\n\t'), 'abc')
self.assertEqual(strip_or_none('abc'), 'abc')
self.assertEqual(strip_or_none(''), '')
self.assertEqual(strip_or_none(None), None)
self.assertEqual(strip_or_none(42), None)
self.assertEqual(strip_or_none([]), None)
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_lowercase_escape(self):
self.assertEqual(lowercase_escape('aä'), 'aä')
self.assertEqual(lowercase_escape('\\u0026'), '&')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_mimetype2ext(self):
self.assertEqual(mimetype2ext(None), None)
self.assertEqual(mimetype2ext('video/x-flv'), 'flv')
self.assertEqual(mimetype2ext('application/x-mpegURL'), 'm3u8')
self.assertEqual(mimetype2ext('text/vtt'), 'vtt')
self.assertEqual(mimetype2ext('text/vtt;charset=utf-8'), 'vtt')
self.assertEqual(mimetype2ext('text/html; charset=utf-8'), 'html')
self.assertEqual(mimetype2ext('audio/x-wav'), 'wav')
self.assertEqual(mimetype2ext('audio/x-wav;codec=pcm'), 'wav')
def test_month_by_name(self):
self.assertEqual(month_by_name(None), None)
self.assertEqual(month_by_name('December', 'en'), 12)
self.assertEqual(month_by_name('décembre', 'fr'), 12)
self.assertEqual(month_by_name('December'), 12)
self.assertEqual(month_by_name('décembre'), None)
self.assertEqual(month_by_name('Unknown', 'unknown'), None)
def test_parse_codecs(self):
self.assertEqual(parse_codecs(''), {})
self.assertEqual(parse_codecs('avc1.77.30, mp4a.40.2'), {
'vcodec': 'avc1.77.30',
'acodec': 'mp4a.40.2',
})
self.assertEqual(parse_codecs('mp4a.40.2'), {
'vcodec': 'none',
'acodec': 'mp4a.40.2',
})
self.assertEqual(parse_codecs('mp4a.40.5,avc1.42001e'), {
'vcodec': 'avc1.42001e',
'acodec': 'mp4a.40.5',
})
self.assertEqual(parse_codecs('avc3.640028'), {
'vcodec': 'avc3.640028',
'acodec': 'none',
})
self.assertEqual(parse_codecs(', h264,,newcodec,aac'), {
'vcodec': 'h264',
'acodec': 'aac',
})
self.assertEqual(parse_codecs('av01.0.05M.08'), {
'vcodec': 'av01.0.05M.08',
'acodec': 'none',
})
self.assertEqual(parse_codecs('theora, vorbis'), {
'vcodec': 'theora',
'acodec': 'vorbis',
})
self.assertEqual(parse_codecs('unknownvcodec, unknownacodec'), {
'vcodec': 'unknownvcodec',
'acodec': 'unknownacodec',
})
self.assertEqual(parse_codecs('unknown'), {})
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"'''
self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''')
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
inp = '''{
0:{src:'skipped', type: 'application/dash+xml'},
1:{src:'skipped', type: 'application/vnd.apple.mpegURL'},
}'''
self.assertEqual(js_to_json(inp), '''{
"0":{"src":"skipped", "type": "application/dash+xml"},
"1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"}
}''')
inp = '''{"foo":101}'''
self.assertEqual(js_to_json(inp), '''{"foo":101}''')
inp = '''{"duration": "00:01:07"}'''
self.assertEqual(js_to_json(inp), '''{"duration": "00:01:07"}''')
inp = '''{segments: [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}'''
self.assertEqual(js_to_json(inp), '''{"segments": [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}''')
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
# Ignore JavaScript code as well
on = js_to_json('''{
"x": 1,
y: "a",
z: some.code
}''')
d = json.loads(on)
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 'a')
# Just drop ! prefix for now though this results in a wrong value
on = js_to_json('''{
a: !0,
b: !1,
c: !!0,
d: !!42.42,
e: !!![],
f: !"abc",
g: !"",
!42: 42
}''')
self.assertEqual(json.loads(on), {
'a': 0,
'b': 1,
'c': 0,
'd': 42.42,
'e': [],
'f': "abc",
'g': "",
'42': 42
})
on = js_to_json('["abc", "def",]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[/*comment\n*/"abc"/*comment\n*/,/*comment\n*/"def",/*comment\n*/]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[//comment\n"abc" //comment\n,//comment\n"def",//comment\n]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('{"abc": "def",}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{/*comment\n*/"abc"/*comment\n*/:/*comment\n*/"def"/*comment\n*/,/*comment\n*/}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{ 0: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ /*comment\n*/0/*comment\n*/: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ 0: // comment\n1 }')
self.assertEqual(json.loads(on), {'0': 1})
on = js_to_json(r'["<p>x<\/p>"]')
self.assertEqual(json.loads(on), ['<p>x</p>'])
on = js_to_json(r'["\xaa"]')
self.assertEqual(json.loads(on), ['\u00aa'])
on = js_to_json("['a\\\nb']")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json("/*comment\n*/[/*comment\n*/'a\\\nb'/*comment\n*/]/*comment\n*/")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json('{0xff:0xff}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{/*comment\n*/0xff/*comment\n*/:/*comment\n*/0xff/*comment\n*/}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{077:077}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{/*comment\n*/077/*comment\n*/:/*comment\n*/077/*comment\n*/}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{42:42}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{/*comment\n*/42/*comment\n*/:/*comment\n*/42/*comment\n*/}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{42:4.2e1}')
self.assertEqual(json.loads(on), {'42': 42.0})
on = js_to_json('{ "0x40": "0x40" }')
self.assertEqual(json.loads(on), {'0x40': '0x40'})
on = js_to_json('{ "040": "040" }')
self.assertEqual(json.loads(on), {'040': '040'})
on = js_to_json('[1,//{},\n2]')
self.assertEqual(json.loads(on), [1, 2])
def test_js_to_json_malformed(self):
self.assertEqual(js_to_json('42a1'), '42"a1"')
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
def test_extract_attributes(self):
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="a \'b\' c">'), {'x': "a 'b' c"})
self.assertEqual(extract_attributes('<e x=\'a "b" c\'>'), {'x': 'a "b" c'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="&">'), {'x': '&'}) # XML
self.assertEqual(extract_attributes('<e x=""">'), {'x': '"'})
self.assertEqual(extract_attributes('<e x="£">'), {'x': '£'}) # HTML 3.2
self.assertEqual(extract_attributes('<e x="λ">'), {'x': 'λ'}) # HTML 4.0
self.assertEqual(extract_attributes('<e x="&foo">'), {'x': '&foo'})
self.assertEqual(extract_attributes('<e x="\'">'), {'x': "'"})
self.assertEqual(extract_attributes('<e x=\'"\'>'), {'x': '"'})
self.assertEqual(extract_attributes('<e x >'), {'x': None})
self.assertEqual(extract_attributes('<e x=y a>'), {'x': 'y', 'a': None})
self.assertEqual(extract_attributes('<e x= y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=1 y=2 x=3>'), {'y': '2', 'x': '3'})
self.assertEqual(extract_attributes('<e \nx=\ny\n>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx=\n"y"\n>'), {'x': 'y'})
self.assertEqual(extract_attributes("<e \nx=\n'y'\n>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx="\ny\n">'), {'x': '\ny\n'})
self.assertEqual(extract_attributes('<e CAPS=x>'), {'caps': 'x'}) # Names lowercased
self.assertEqual(extract_attributes('<e x=1 X=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e X=1 x=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e _:funny-name1=1>'), {'_:funny-name1': '1'})
self.assertEqual(extract_attributes('<e x="Fáilte 世界 \U0001f600">'), {'x': 'Fáilte 世界 \U0001f600'})
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
# "Narrow" Python builds don't support unicode code points outside BMP.
try:
compat_chr(0x10000)
supports_outside_bmp = True
except ValueError:
supports_outside_bmp = False
if supports_outside_bmp:
self.assertEqual(extract_attributes('<e x="Smile 😀!">'), {'x': 'Smile \U0001f600!'})
# Malformed HTML should not break attributes extraction on older Python
self.assertEqual(extract_attributes('<mal"formed/>'), {})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
self.assertEqual(clean_html('a<br>\xa0b'), 'a\nb')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""'
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
self.assertEqual(parse_filesize('1.2tb'), 1200000000000)
self.assertEqual(parse_filesize('1,24 KB'), 1240)
self.assertEqual(parse_filesize('1,24 kb'), 1240)
self.assertEqual(parse_filesize('8.5 megabytes'), 8500000)
def test_parse_count(self):
self.assertEqual(parse_count(None), None)
self.assertEqual(parse_count(''), None)
self.assertEqual(parse_count('0'), 0)
self.assertEqual(parse_count('1000'), 1000)
self.assertEqual(parse_count('1.000'), 1000)
self.assertEqual(parse_count('1.1k'), 1100)
self.assertEqual(parse_count('1.1kk'), 1100000)
self.assertEqual(parse_count('1.1kk '), 1100000)
self.assertEqual(parse_count('1.1kk views'), 1100000)
def test_parse_resolution(self):
self.assertEqual(parse_resolution(None), {})
self.assertEqual(parse_resolution(''), {})
self.assertEqual(parse_resolution('1920x1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920×1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('720p'), {'height': 720})
self.assertEqual(parse_resolution('4k'), {'height': 2160})
self.assertEqual(parse_resolution('8K'), {'height': 4320})
def test_parse_bitrate(self):
self.assertEqual(parse_bitrate(None), None)
self.assertEqual(parse_bitrate(''), None)
self.assertEqual(parse_bitrate('300kbps'), 300)
self.assertEqual(parse_bitrate('1500kbps'), 1500)
self.assertEqual(parse_bitrate('300 kbps'), 300)
def test_version_tuple(self):
self.assertEqual(version_tuple('1'), (1,))
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
def test_detect_exe_version(self):
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
configuration: --prefix=/usr --extra-'''), '1.2.1')
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
Trying to open render node...
Success at /dev/dri/renderD128.
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
def test_age_restricted(self):
self.assertFalse(age_restricted(None, 10)) # unrestricted content
self.assertFalse(age_restricted(1, None)) # unrestricted policy
self.assertFalse(age_restricted(8, 10))
self.assertTrue(age_restricted(18, 14))
self.assertFalse(age_restricted(18, 18))
def test_is_html(self):
self.assertFalse(is_html(b'\x49\x44\x43<html'))
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
))
self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
))
self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
self.assertTrue(is_html( # UTF-32-LE
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
def test_render_table(self):
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]]),
'a bcd\n'
'123 4\n'
'9999 51')
def test_match_str(self):
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
self.assertFalse(match_str('xy', {'x': 1200}))
self.assertTrue(match_str('!xy', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 1200}))
self.assertFalse(match_str('!x', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 0}))
self.assertFalse(match_str('x>0', {'x': 0}))
self.assertFalse(match_str('x>0', {}))
self.assertTrue(match_str('x>?0', {}))
self.assertTrue(match_str('x>1K', {'x': 1200}))
self.assertFalse(match_str('x>2K', {'x': 1200}))
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 90, 'description': 'foo'}))
self.assertTrue(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
self.assertTrue(match_str('is_live', {'is_live': True}))
self.assertFalse(match_str('is_live', {'is_live': False}))
self.assertFalse(match_str('is_live', {'is_live': None}))
self.assertFalse(match_str('is_live', {}))
self.assertFalse(match_str('!is_live', {'is_live': True}))
self.assertTrue(match_str('!is_live', {'is_live': False}))
self.assertTrue(match_str('!is_live', {'is_live': None}))
self.assertTrue(match_str('!is_live', {}))
self.assertTrue(match_str('title', {'title': 'abc'}))
self.assertTrue(match_str('title', {'title': ''}))
self.assertFalse(match_str('!title', {'title': 'abc'}))
self.assertFalse(match_str('!title', {'title': ''}))
def test_parse_dfxp_time_expr(self):
self.assertEqual(parse_dfxp_time_expr(None), None)
self.assertEqual(parse_dfxp_time_expr(''), None)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
<p begin="3" end="-1">Lines with invalid timestamps are ignored</p>
<p begin="-1" end="-1">Ignore, two</p>
<p begin="3" dur="-1">Ignored, three</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols
2
00:00:01,000 --> 00:00:02,000
第二行
♪♪
3
00:00:02,000 --> 00:00:03,000
Third
Line
'''
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The first line</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
dfxp_data_with_style = '''<?xml version="1.0" encoding="utf-8"?>
<tt xmlns="http://www.w3.org/2006/10/ttaf1" xmlns:ttp="http://www.w3.org/2006/10/ttaf1#parameter" ttp:timeBase="media" xmlns:tts="http://www.w3.org/2006/10/ttaf1#style" xml:lang="en" xmlns:ttm="http://www.w3.org/2006/10/ttaf1#metadata">
<head>
<styling>
<style id="s2" style="s0" tts:color="cyan" tts:fontWeight="bold" />
<style id="s1" style="s0" tts:color="yellow" tts:fontStyle="italic" />
<style id="s3" style="s0" tts:color="lime" tts:textDecoration="underline" />
<style id="s0" tts:backgroundColor="black" tts:fontStyle="normal" tts:fontSize="16" tts:fontFamily="sansSerif" tts:color="white" />
</styling>
</head>
<body tts:textAlign="center" style="s0">
<div>
<p begin="00:00:02.08" id="p0" end="00:00:05.84">default style<span tts:color="red">custom style</span></p>
<p style="s2" begin="00:00:02.08" id="p0" end="00:00:05.84"><span tts:color="lime">part 1<br /></span><span tts:color="cyan">part 2</span></p>
<p style="s3" begin="00:00:05.84" id="p1" end="00:00:09.56">line 3<br />part 3</p>
<p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:02,080 --> 00:00:05,839
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
2
00:00:02,080 --> 00:00:05,839
<b><font color="cyan" face="sansSerif" size="16"><font color="lime">part 1
</font>part 2</font></b>
3
00:00:05,839 --> 00:00:09,560
<u><font color="lime">line 3
part 3</font></u>
4
00:00:09,560 --> 00:00:12,359
<i><u><font color="yellow"><font color="lime">inner
</font>style</font></u></i>
'''
self.assertEqual(dfxp2srt(dfxp_data_with_style), srt_data)
dfxp_data_non_utf8 = '''<?xml version="1.0" encoding="UTF-16"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">Line 1</p>
<p begin="1" end="2">第二行</p>
</div>
</body>
</tt>'''.encode('utf-16')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
Line 1
2
00:00:01,000 --> 00:00:02,000
第二行
'''
self.assertEqual(dfxp2srt(dfxp_data_non_utf8), srt_data)
def test_cli_option(self):
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({'retries': 10}, '--retries', 'retries'), ['--retries', '10'])
def test_cli_valueless_option(self):
self.assertEqual(cli_valueless_option(
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
self.assertEqual(cli_valueless_option(
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
def test_cli_bool_option(self):
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
['--no-check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
['--no-check-certificate=true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
self.assertEqual(
cli_bool_option(
{}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
[])
def test_ohdave_rsa_encrypt(self):
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
e = 65537
self.assertEqual(
ohdave_rsa_encrypt(b'aa111222', e, N),
'726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
def test_pkcs1pad(self):
data = [1, 2, 3]
padded_data = pkcs1pad(data, 32)
self.assertEqual(padded_data[:2], [0, 2])
self.assertEqual(padded_data[28:], [0, 1, 2, 3])
self.assertRaises(ValueError, pkcs1pad, data, 8)
def test_encode_base_n(self):
self.assertEqual(encode_base_n(0, 30), '0')
self.assertEqual(encode_base_n(80, 30), '2k')
custom_table = '9876543210ZYXWVUTSRQPONMLKJIHGFEDCBA'
self.assertEqual(encode_base_n(0, 30, custom_table), '9')
self.assertEqual(encode_base_n(80, 30, custom_table), '7P')
self.assertRaises(ValueError, encode_base_n, 0, 70)
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
def test_caesar(self):
self.assertEqual(caesar('ace', 'abcdef', 2), 'cea')
self.assertEqual(caesar('cea', 'abcdef', -2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', -2), 'eac')
self.assertEqual(caesar('eac', 'abcdef', 2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', 0), 'ace')
self.assertEqual(caesar('xyz', 'abcdef', 2), 'xyz')
self.assertEqual(caesar('abc', 'acegik', 2), 'ebg')
self.assertEqual(caesar('ebg', 'acegik', -2), 'abc')
def test_rot47(self):
self.assertEqual(rot47('yt-dlp'), r'JE\5=A')
self.assertEqual(rot47('YT-DLP'), r'*%\s{!')
def test_urshift(self):
self.assertEqual(urshift(3, 1), 1)
self.assertEqual(urshift(-3, 1), 2147483646)
def test_get_element_by_class(self):
html = '''
<span class="foo bar">nice</span>
'''
self.assertEqual(get_element_by_class('foo', html), 'nice')
self.assertEqual(get_element_by_class('no-such-class', html), None)
def test_get_element_by_attribute(self):
html = '''
<span class="foo bar">nice</span>
'''
self.assertEqual(get_element_by_attribute('class', 'foo bar', html), 'nice')
self.assertEqual(get_element_by_attribute('class', 'foo', html), None)
self.assertEqual(get_element_by_attribute('class', 'no-such-foo', html), None)
html = '''
<div itemprop="author" itemscope>foo</div>
'''
self.assertEqual(get_element_by_attribute('itemprop', 'author', html), 'foo')
def test_get_elements_by_class(self):
html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('no-such-class', html), [])
def test_get_elements_by_attribute(self):
html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
self.assertEqual(get_elements_by_attribute('class', 'foo bar', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_attribute('class', 'foo', html), [])
self.assertEqual(get_elements_by_attribute('class', 'no-such-foo', html), [])
def test_iri_to_uri(self):
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=foo&ie=utf-8&oe=utf-8&client=firefox-b'),
'https://www.google.com/search?q=foo&ie=utf-8&oe=utf-8&client=firefox-b') # Same
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=Käsesoßenrührlöffel'), # German for cheese sauce stirring spoon
'https://www.google.com/search?q=K%C3%A4seso%C3%9Fenr%C3%BChrl%C3%B6ffel')
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=lt<+gt>+eq%3D+amp%26+percent%25+hash%23+colon%3A+tilde~#trash=?&garbage=#'),
'https://www.google.com/search?q=lt%3C+gt%3E+eq%3D+amp%26+percent%25+hash%23+colon%3A+tilde~#trash=?&garbage=#')
self.assertEqual(
iri_to_uri('http://правозащита38.рф/category/news/'),
'http://xn--38-6kcaak9aj5chl4a3g.xn--p1ai/category/news/')
self.assertEqual(
iri_to_uri('http://www.правозащита38.рф/category/news/'),
'http://www.xn--38-6kcaak9aj5chl4a3g.xn--p1ai/category/news/')
self.assertEqual(
iri_to_uri('https://i❤.ws/emojidomain/👍👏🤝💪'),
'https://xn--i-7iq.ws/emojidomain/%F0%9F%91%8D%F0%9F%91%8F%F0%9F%A4%9D%F0%9F%92%AA')
self.assertEqual(
iri_to_uri('http://日本語.jp/'),
'http://xn--wgv71a119e.jp/')
self.assertEqual(
iri_to_uri('http://导航.中国/'),
'http://xn--fet810g.xn--fiqs8s/')
def test_clean_podcast_url(self):
self.assertEqual(clean_podcast_url('https://www.podtrac.com/pts/redirect.mp3/chtbl.com/track/5899E/traffic.megaphone.fm/HSW7835899191.mp3'), 'https://traffic.megaphone.fm/HSW7835899191.mp3')
self.assertEqual(clean_podcast_url('https://play.podtrac.com/npr-344098539/edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3'), 'https://edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3')
def test_LazyList(self):
it = list(range(10))
self.assertEqual(list(LazyList(it)), it)
self.assertEqual(LazyList(it).exhaust(), it)
self.assertEqual(LazyList(it)[5], it[5])
self.assertEqual(LazyList(it)[5:], it[5:])
self.assertEqual(LazyList(it)[:5], it[:5])
self.assertEqual(LazyList(it)[::2], it[::2])
self.assertEqual(LazyList(it)[1::2], it[1::2])
self.assertEqual(LazyList(it)[5::-1], it[5::-1])
self.assertEqual(LazyList(it)[6:2:-2], it[6:2:-2])
self.assertEqual(LazyList(it)[::-1], it[::-1])
self.assertTrue(LazyList(it))
self.assertFalse(LazyList(range(0)))
self.assertEqual(len(LazyList(it)), len(it))
self.assertEqual(repr(LazyList(it)), repr(it))
self.assertEqual(str(LazyList(it)), str(it))
self.assertEqual(list(LazyList(it).reverse()), it[::-1])
self.assertEqual(list(LazyList(it).reverse()[1:3:7]), it[::-1][1:3:7])
self.assertEqual(list(LazyList(it).reverse()[::-1]), it)
def test_LazyList_laziness(self):
def test(ll, idx, val, cache):
self.assertEqual(ll[idx], val)
self.assertEqual(getattr(ll, '_LazyList__cache'), list(cache))
ll = LazyList(range(10))
test(ll, 0, 0, range(1))
test(ll, 5, 5, range(6))
test(ll, -3, 7, range(10))
ll = LazyList(range(10)).reverse()
test(ll, -1, 0, range(1))
test(ll, 3, 6, range(10))
ll = LazyList(itertools.count())
test(ll, 10, 10, range(11))
ll.reverse()
test(ll, -15, 14, range(15))
if __name__ == '__main__':
unittest.main()
| 46.69513 | 382 | 0.606448 |
from __future__ import unicode_literals
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import itertools
import json
import xml.etree.ElementTree
from yt_dlp.utils import (
age_restricted,
args_to_str,
encode_base_n,
caesar,
clean_html,
clean_podcast_url,
date_from_str,
datetime_from_str,
DateRange,
detect_exe_version,
determine_ext,
dict_get,
encode_compat_str,
encodeFilename,
escape_rfc3986,
escape_url,
extract_attributes,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
get_element_by_class,
get_element_by_attribute,
get_elements_by_class,
get_elements_by_attribute,
InAdvancePagedList,
int_or_none,
intlist_to_bytes,
is_html,
js_to_json,
limit_length,
merge_dicts,
mimetype2ext,
month_by_name,
multipart_encode,
ohdave_rsa_encrypt,
OnDemandPagedList,
orderedSet,
parse_age_limit,
parse_duration,
parse_filesize,
parse_count,
parse_iso8601,
parse_resolution,
parse_bitrate,
pkcs1pad,
read_batch_urls,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
expand_path,
prepend_extension,
replace_extension,
remove_start,
remove_end,
remove_quotes,
rot47,
shell_quote,
smuggle_url,
str_to_int,
strip_jsonp,
strip_or_none,
subtitles_filename,
timeconvert,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
uppercase_escape,
lowercase_escape,
url_basename,
url_or_none,
base_url,
urljoin,
urlencode_postdata,
urshift,
update_url_query,
version_tuple,
xpath_with_ns,
xpath_element,
xpath_text,
xpath_attr,
render_table,
match_str,
parse_dfxp_time_expr,
dfxp2srt,
cli_option,
cli_valueless_option,
cli_bool_option,
parse_codecs,
iri_to_uri,
LazyList,
)
from yt_dlp.compat import (
compat_chr,
compat_etree_fromstring,
compat_getenv,
compat_os_name,
compat_setenv,
compat_urlparse,
compat_parse_qs,
)
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename(''), '')
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'aäb\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
self.assertEqual(sanitize_filename(
'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True),
'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYTHssaaaaaaaeceeeeiiiionooooooooeuuuuuythy')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
if sys.platform != 'win32':
return
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
self.assertEqual(sanitize_path('abc|def'), 'abc
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
self.assertEqual(
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
self.assertEqual(sanitize_path('../abc'), '..\\abc')
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
def test_sanitize_url(self):
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('foo bar'), 'foo bar')
def test_extract_basic_auth(self):
auth_header = lambda url: sanitized_Request(url).get_header('Authorization')
self.assertFalse(auth_header('http://foo.bar'))
self.assertFalse(auth_header('http://:foo.bar'))
self.assertEqual(auth_header('http://@foo.bar'), 'Basic Og==')
self.assertEqual(auth_header('http://:pass@foo.bar'), 'Basic OnBhc3M=')
self.assertEqual(auth_header('http://user:@foo.bar'), 'Basic dXNlcjo=')
self.assertEqual(auth_header('http://user:pass@foo.bar'), 'Basic dXNlcjpwYXNz')
def test_expand_path(self):
def env(var):
return '%{0}%'.format(var) if sys.platform == 'win32' else '${0}'.format(var)
compat_setenv('yt_dlp_EXPATH_PATH', 'expanded')
self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded')
self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME'))
self.assertEqual(expand_path('~'), compat_getenv('HOME'))
self.assertEqual(
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')),
'%s/expanded' % compat_getenv('HOME'))
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
def test_replace_extension(self):
self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
def test_subtitles_filename(self):
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.unexpected_ext', 'en', 'vtt', 'ext'), 'abc.unexpected_ext.en.vtt')
def test_remove_start(self):
self.assertEqual(remove_start(None, 'A - '), None)
self.assertEqual(remove_start('A - B', 'A - '), 'B')
self.assertEqual(remove_start('B - A', 'A - '), 'B - A')
def test_remove_end(self):
self.assertEqual(remove_end(None, ' - B'), None)
self.assertEqual(remove_end('A - B', ' - B'), 'A')
self.assertEqual(remove_end('B - A', ' - B'), 'B - A')
def test_remove_quotes(self):
self.assertEqual(remove_quotes(None), None)
self.assertEqual(remove_quotes('"'), '"')
self.assertEqual(remove_quotes("'"), "'")
self.assertEqual(remove_quotes(';'), ';')
self.assertEqual(remove_quotes('";'), '";')
self.assertEqual(remove_quotes('""'), '')
self.assertEqual(remove_quotes('";"'), ';')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('é'), 'é')
self.assertEqual(unescapeHTML('�'), '�')
self.assertEqual(unescapeHTML('&a"'), '&a"')
# HTML5 entities
self.assertEqual(unescapeHTML('.''), '.\'')
def test_date_from_str(self):
self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day'))
self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week'))
self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week'))
self.assertEqual(date_from_str('20200229+365day'), date_from_str('20200229+1year'))
self.assertEqual(date_from_str('20210131+28day'), date_from_str('20210131+1month'))
def test_datetime_from_str(self):
self.assertEqual(datetime_from_str('yesterday', precision='day'), datetime_from_str('now-1day', precision='auto'))
self.assertEqual(datetime_from_str('now+7day', precision='day'), datetime_from_str('now+1week', precision='auto'))
self.assertEqual(datetime_from_str('now+14day', precision='day'), datetime_from_str('now+2week', precision='auto'))
self.assertEqual(datetime_from_str('20200229+365day', precision='day'), datetime_from_str('20200229+1year', precision='auto'))
self.assertEqual(datetime_from_str('20210131+28day', precision='day'), datetime_from_str('20210131+1month', precision='auto'))
self.assertEqual(datetime_from_str('20210131+59day', precision='day'), datetime_from_str('20210131+2month', precision='auto'))
self.assertEqual(datetime_from_str('now+1day', precision='hour'), datetime_from_str('now+24hours', precision='auto'))
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
self.assertEqual(
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
'20141126')
self.assertEqual(
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227')
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_strdate('Feb 7, 2016 at 6:35 pm'), '20160207')
self.assertEqual(unified_strdate('July 15th, 2013'), '20130715')
self.assertEqual(unified_strdate('September 1st, 2013'), '20130901')
self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902')
self.assertEqual(unified_strdate('November 3rd, 2019'), '20191103')
self.assertEqual(unified_strdate('October 23rd, 2005'), '20051023')
def test_unified_timestamps(self):
self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600)
self.assertEqual(unified_timestamp('8/7/2009'), 1247011200)
self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200)
self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598)
self.assertEqual(unified_timestamp('1968 12 10'), -33436800)
self.assertEqual(unified_timestamp('1968-12-10'), -33436800)
self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200)
self.assertEqual(
unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False),
1417001400)
self.assertEqual(
unified_timestamp('2/2/2015 6:47:40 PM', day_first=False),
1422902860)
self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900)
self.assertEqual(unified_timestamp('25-09-2014'), 1411603200)
self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200)
self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_timestamp('May 16, 2016 11:15 PM'), 1463440500)
self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100)
self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361)
self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
def test_determine_ext(self):
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8')
self.assertEqual(determine_ext('foobar', None), None)
def test_find_xpath_attr(self):
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
<node x="" />
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = compat_etree_fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_xpath_element(self):
doc = xml.etree.ElementTree.Element('root')
div = xml.etree.ElementTree.SubElement(doc, 'div')
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
self.assertEqual(xpath_element(doc, ['div/p']), p)
self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
self.assertTrue(xpath_element(doc, ['div/bar']) is None)
self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True)
def test_xpath_text(self):
testxml = '''<root>
<div>
<p>Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
def test_xpath_attr(self):
testxml = '''<root>
<div>
<p x="a">Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
smug_url = smuggle_url(url, {'a': 'b'})
smug_smug_url = smuggle_url(smug_url, {'c': 'd'})
res_url, res_data = unsmuggle_url(smug_smug_url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, {'a': 'b', 'c': 'd'})
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(
shell_quote(args),
"""ffmpeg -i 'ñ€ß'"'"'.mp4'""" if compat_os_name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''')
def test_float_or_none(self):
self.assertEqual(float_or_none('42.42'), 42.42)
self.assertEqual(float_or_none('42'), 42.0)
self.assertEqual(float_or_none(''), None)
self.assertEqual(float_or_none(None), None)
self.assertEqual(float_or_none([]), None)
self.assertEqual(float_or_none(set()), None)
def test_int_or_none(self):
self.assertEqual(int_or_none('42'), 42)
self.assertEqual(int_or_none(''), None)
self.assertEqual(int_or_none(None), None)
self.assertEqual(int_or_none([]), None)
self.assertEqual(int_or_none(set()), None)
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
self.assertEqual(str_to_int(523), 523)
# Python 3 has no long
if sys.version_info < (3, 0):
eval('self.assertEqual(str_to_int(123456L), 123456)')
self.assertEqual(str_to_int('noninteger'), None)
self.assertEqual(str_to_int([]), None)
def test_url_basename(self):
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_base_url(self):
self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/')
def test_urljoin(self):
self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(['foobar'], 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', None), None)
self.assertEqual(urljoin('http://foo.de/', ''), None)
self.assertEqual(urljoin('http://foo.de/', ['foobar']), None)
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', '.././../d.txt'), 'http://foo.de/d.txt')
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', 'rtmp://foo.de'), 'rtmp://foo.de')
self.assertEqual(urljoin(None, 'rtmp://foo.de'), 'rtmp://foo.de')
def test_url_or_none(self):
self.assertEqual(url_or_none(None), None)
self.assertEqual(url_or_none(''), None)
self.assertEqual(url_or_none('foo'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('https://foo.de'), 'https://foo.de')
self.assertEqual(url_or_none('http$://foo.de'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('//foo.de'), '//foo.de')
self.assertEqual(url_or_none('s3://foo.de'), None)
self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de')
self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de')
self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de')
self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de')
def test_parse_age_limit(self):
self.assertEqual(parse_age_limit(None), None)
self.assertEqual(parse_age_limit(False), None)
self.assertEqual(parse_age_limit('invalid'), None)
self.assertEqual(parse_age_limit(0), 0)
self.assertEqual(parse_age_limit(18), 18)
self.assertEqual(parse_age_limit(21), 21)
self.assertEqual(parse_age_limit(22), None)
self.assertEqual(parse_age_limit('18'), 18)
self.assertEqual(parse_age_limit('18+'), 18)
self.assertEqual(parse_age_limit('PG-13'), 13)
self.assertEqual(parse_age_limit('TV-14'), 14)
self.assertEqual(parse_age_limit('TV-MA'), 17)
self.assertEqual(parse_age_limit('TV14'), 14)
self.assertEqual(parse_age_limit('TV_G'), 0)
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration(False), None)
self.assertEqual(parse_duration('invalid'), None)
self.assertEqual(parse_duration('1'), 1)
self.assertEqual(parse_duration('1337:12'), 80232)
self.assertEqual(parse_duration('9:12:43'), 33163)
self.assertEqual(parse_duration('12:00'), 720)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
self.assertEqual(parse_duration('5 s'), 5)
self.assertEqual(parse_duration('3 min'), 180)
self.assertEqual(parse_duration('2.5 hours'), 9000)
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
self.assertEqual(parse_duration('87 Min.'), 5220)
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88)
def test_fix_xml_ampersands(self):
self.assertEqual(
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
self.assertEqual(
fix_xml_ampersands('"&x=y&wrong;&z=a'),
'"&x=y&wrong;&z=a')
self.assertEqual(
fix_xml_ampersands('&'><"'),
'&'><"')
self.assertEqual(
fix_xml_ampersands('& def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum):
firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize)
for i in range(firstid, upto):
yield i
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (4,), [4])
testPL(5, 2, (0, 3), [0, 1, 2])
testPL(5, 2, (1, 4), [1, 2, 3])
testPL(5, 2, (2, 99), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_read_batch_urls(self):
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
def test_update_url_query(self):
def query_dict(url):
return compat_parse_qs(compat_urlparse.urlparse(url).query)
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
query_dict('http://example.com/path?quality=HD&format=mp4'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path?manifest=f4m', {'manifest': []})),
query_dict('http://example.com/path'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
query_dict('http://example.com/path?system=LINUX'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'width': 1080, 'height': 720})),
query_dict('http://example.com/path?width=1080&height=720'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'bitrate': 5020.43})),
query_dict('http://example.com/path?bitrate=5020.43'))
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'test': '第二行тест'})),
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
def test_multipart_encode(self):
self.assertEqual(
multipart_encode({b'field': b'value'}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="field"\r\n\r\nvalue\r\n--AAAAAA--\r\n')
self.assertEqual(
multipart_encode({'欄位'.encode('utf-8'): '值'.encode('utf-8')}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="\xe6\xac\x84\xe4\xbd\x8d"\r\n\r\n\xe5\x80\xbc\r\n--AAAAAA--\r\n')
self.assertRaises(
ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
def test_dict_get(self):
FALSE_VALUES = {
'none': None,
'false': False,
'zero': 0,
'empty_string': '',
'empty_list': [],
}
d = FALSE_VALUES.copy()
d['a'] = 42
self.assertEqual(dict_get(d, 'a'), 42)
self.assertEqual(dict_get(d, 'b'), None)
self.assertEqual(dict_get(d, 'b', 42), 42)
self.assertEqual(dict_get(d, ('a', )), 42)
self.assertEqual(dict_get(d, ('b', 'a', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', )), None)
self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
for key, false_value in FALSE_VALUES.items():
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
def test_merge_dicts(self):
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': None}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': ''}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {}), {'a': 1})
self.assertEqual(merge_dicts({'a': None}, {'a': 1}), {'a': 1})
self.assertEqual(merge_dicts({'a': ''}, {'a': 1}), {'a': ''})
self.assertEqual(merge_dicts({'a': ''}, {'a': 'abc'}), {'a': 'abc'})
self.assertEqual(merge_dicts({'a': None}, {'a': ''}, {'a': 'abc'}), {'a': 'abc'})
def test_encode_compat_str(self):
self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251)
self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
stripped = strip_jsonp('ps.embedHandler({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && window.cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
def test_strip_or_none(self):
self.assertEqual(strip_or_none(' abc'), 'abc')
self.assertEqual(strip_or_none('abc '), 'abc')
self.assertEqual(strip_or_none(' abc '), 'abc')
self.assertEqual(strip_or_none('\tabc\t'), 'abc')
self.assertEqual(strip_or_none('\n\tabc\n\t'), 'abc')
self.assertEqual(strip_or_none('abc'), 'abc')
self.assertEqual(strip_or_none(''), '')
self.assertEqual(strip_or_none(None), None)
self.assertEqual(strip_or_none(42), None)
self.assertEqual(strip_or_none([]), None)
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_lowercase_escape(self):
self.assertEqual(lowercase_escape('aä'), 'aä')
self.assertEqual(lowercase_escape('\\u0026'), '&')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_mimetype2ext(self):
self.assertEqual(mimetype2ext(None), None)
self.assertEqual(mimetype2ext('video/x-flv'), 'flv')
self.assertEqual(mimetype2ext('application/x-mpegURL'), 'm3u8')
self.assertEqual(mimetype2ext('text/vtt'), 'vtt')
self.assertEqual(mimetype2ext('text/vtt;charset=utf-8'), 'vtt')
self.assertEqual(mimetype2ext('text/html; charset=utf-8'), 'html')
self.assertEqual(mimetype2ext('audio/x-wav'), 'wav')
self.assertEqual(mimetype2ext('audio/x-wav;codec=pcm'), 'wav')
def test_month_by_name(self):
self.assertEqual(month_by_name(None), None)
self.assertEqual(month_by_name('December', 'en'), 12)
self.assertEqual(month_by_name('décembre', 'fr'), 12)
self.assertEqual(month_by_name('December'), 12)
self.assertEqual(month_by_name('décembre'), None)
self.assertEqual(month_by_name('Unknown', 'unknown'), None)
def test_parse_codecs(self):
self.assertEqual(parse_codecs(''), {})
self.assertEqual(parse_codecs('avc1.77.30, mp4a.40.2'), {
'vcodec': 'avc1.77.30',
'acodec': 'mp4a.40.2',
})
self.assertEqual(parse_codecs('mp4a.40.2'), {
'vcodec': 'none',
'acodec': 'mp4a.40.2',
})
self.assertEqual(parse_codecs('mp4a.40.5,avc1.42001e'), {
'vcodec': 'avc1.42001e',
'acodec': 'mp4a.40.5',
})
self.assertEqual(parse_codecs('avc3.640028'), {
'vcodec': 'avc3.640028',
'acodec': 'none',
})
self.assertEqual(parse_codecs(', h264,,newcodec,aac'), {
'vcodec': 'h264',
'acodec': 'aac',
})
self.assertEqual(parse_codecs('av01.0.05M.08'), {
'vcodec': 'av01.0.05M.08',
'acodec': 'none',
})
self.assertEqual(parse_codecs('theora, vorbis'), {
'vcodec': 'theora',
'acodec': 'vorbis',
})
self.assertEqual(parse_codecs('unknownvcodec, unknownacodec'), {
'vcodec': 'unknownvcodec',
'acodec': 'unknownacodec',
})
self.assertEqual(parse_codecs('unknown'), {})
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"'''
self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''')
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
inp = '''{
0:{src:'skipped', type: 'application/dash+xml'},
1:{src:'skipped', type: 'application/vnd.apple.mpegURL'},
}'''
self.assertEqual(js_to_json(inp), '''{
"0":{"src":"skipped", "type": "application/dash+xml"},
"1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"}
}''')
inp = '''{"foo":101}'''
self.assertEqual(js_to_json(inp), '''{"foo":101}''')
inp = '''{"duration": "00:01:07"}'''
self.assertEqual(js_to_json(inp), '''{"duration": "00:01:07"}''')
inp = '''{segments: [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}'''
self.assertEqual(js_to_json(inp), '''{"segments": [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}''')
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
on = js_to_json('''{
"x": 1,
y: "a",
z: some.code
}''')
d = json.loads(on)
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 'a')
on = js_to_json('''{
a: !0,
b: !1,
c: !!0,
d: !!42.42,
e: !!![],
f: !"abc",
g: !"",
!42: 42
}''')
self.assertEqual(json.loads(on), {
'a': 0,
'b': 1,
'c': 0,
'd': 42.42,
'e': [],
'f': "abc",
'g': "",
'42': 42
})
on = js_to_json('["abc", "def",]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[/*comment\n*/"abc"/*comment\n*/,/*comment\n*/"def",/*comment\n*/]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[//comment\n"abc" //comment\n,//comment\n"def",//comment\n]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('{"abc": "def",}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{/*comment\n*/"abc"/*comment\n*/:/*comment\n*/"def"/*comment\n*/,/*comment\n*/}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{ 0: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ /*comment\n*/0/*comment\n*/: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ 0: // comment\n1 }')
self.assertEqual(json.loads(on), {'0': 1})
on = js_to_json(r'["<p>x<\/p>"]')
self.assertEqual(json.loads(on), ['<p>x</p>'])
on = js_to_json(r'["\xaa"]')
self.assertEqual(json.loads(on), ['\u00aa'])
on = js_to_json("['a\\\nb']")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json("/*comment\n*/[/*comment\n*/'a\\\nb'/*comment\n*/]/*comment\n*/")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json('{0xff:0xff}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{/*comment\n*/0xff/*comment\n*/:/*comment\n*/0xff/*comment\n*/}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{077:077}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{/*comment\n*/077/*comment\n*/:/*comment\n*/077/*comment\n*/}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{42:42}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{/*comment\n*/42/*comment\n*/:/*comment\n*/42/*comment\n*/}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{42:4.2e1}')
self.assertEqual(json.loads(on), {'42': 42.0})
on = js_to_json('{ "0x40": "0x40" }')
self.assertEqual(json.loads(on), {'0x40': '0x40'})
on = js_to_json('{ "040": "040" }')
self.assertEqual(json.loads(on), {'040': '040'})
on = js_to_json('[1,//{},\n2]')
self.assertEqual(json.loads(on), [1, 2])
def test_js_to_json_malformed(self):
self.assertEqual(js_to_json('42a1'), '42"a1"')
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
def test_extract_attributes(self):
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="a \'b\' c">'), {'x': "a 'b' c"})
self.assertEqual(extract_attributes('<e x=\'a "b" c\'>'), {'x': 'a "b" c'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="&">'), {'x': '&'})
self.assertEqual(extract_attributes('<e x=""">'), {'x': '"'})
self.assertEqual(extract_attributes('<e x="£">'), {'x': '£'}) # HTML 3.2
self.assertEqual(extract_attributes('<e x="λ">'), {'x': 'λ'}) # HTML 4.0
self.assertEqual(extract_attributes('<e x="&foo">'), {'x': '&foo'})
self.assertEqual(extract_attributes('<e x="\'">'), {'x': "'"})
self.assertEqual(extract_attributes('<e x=\'"\'>'), {'x': '"'})
self.assertEqual(extract_attributes('<e x >'), {'x': None})
self.assertEqual(extract_attributes('<e x=y a>'), {'x': 'y', 'a': None})
self.assertEqual(extract_attributes('<e x= y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=1 y=2 x=3>'), {'y': '2', 'x': '3'})
self.assertEqual(extract_attributes('<e \nx=\ny\n>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx=\n"y"\n>'), {'x': 'y'})
self.assertEqual(extract_attributes("<e \nx=\n'y'\n>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx="\ny\n">'), {'x': '\ny\n'})
self.assertEqual(extract_attributes('<e CAPS=x>'), {'caps': 'x'}) # Names lowercased
self.assertEqual(extract_attributes('<e x=1 X=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e X=1 x=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e _:funny-name1=1>'), {'_:funny-name1': '1'})
self.assertEqual(extract_attributes('<e x="Fáilte 世界 \U0001f600">'), {'x': 'Fáilte 世界 \U0001f600'})
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
# "Narrow" Python builds don't support unicode code points outside BMP.
try:
compat_chr(0x10000)
supports_outside_bmp = True
except ValueError:
supports_outside_bmp = False
if supports_outside_bmp:
self.assertEqual(extract_attributes('<e x="Smile &
# Malformed HTML should not break attributes extraction on older Python
self.assertEqual(extract_attributes('<mal"formed/>'), {})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
self.assertEqual(clean_html('a<br>\xa0b'), 'a\nb')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""'
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
self.assertEqual(parse_filesize('1.2tb'), 1200000000000)
self.assertEqual(parse_filesize('1,24 KB'), 1240)
self.assertEqual(parse_filesize('1,24 kb'), 1240)
self.assertEqual(parse_filesize('8.5 megabytes'), 8500000)
def test_parse_count(self):
self.assertEqual(parse_count(None), None)
self.assertEqual(parse_count(''), None)
self.assertEqual(parse_count('0'), 0)
self.assertEqual(parse_count('1000'), 1000)
self.assertEqual(parse_count('1.000'), 1000)
self.assertEqual(parse_count('1.1k'), 1100)
self.assertEqual(parse_count('1.1kk'), 1100000)
self.assertEqual(parse_count('1.1kk '), 1100000)
self.assertEqual(parse_count('1.1kk views'), 1100000)
def test_parse_resolution(self):
self.assertEqual(parse_resolution(None), {})
self.assertEqual(parse_resolution(''), {})
self.assertEqual(parse_resolution('1920x1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920×1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('720p'), {'height': 720})
self.assertEqual(parse_resolution('4k'), {'height': 2160})
self.assertEqual(parse_resolution('8K'), {'height': 4320})
def test_parse_bitrate(self):
self.assertEqual(parse_bitrate(None), None)
self.assertEqual(parse_bitrate(''), None)
self.assertEqual(parse_bitrate('300kbps'), 300)
self.assertEqual(parse_bitrate('1500kbps'), 1500)
self.assertEqual(parse_bitrate('300 kbps'), 300)
def test_version_tuple(self):
self.assertEqual(version_tuple('1'), (1,))
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
def test_detect_exe_version(self):
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
configuration: --prefix=/usr --extra-'''), '1.2.1')
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
Trying to open render node...
Success at /dev/dri/renderD128.
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
def test_age_restricted(self):
self.assertFalse(age_restricted(None, 10)) # unrestricted content
self.assertFalse(age_restricted(1, None)) # unrestricted policy
self.assertFalse(age_restricted(8, 10))
self.assertTrue(age_restricted(18, 14))
self.assertFalse(age_restricted(18, 18))
def test_is_html(self):
self.assertFalse(is_html(b'\x49\x44\x43<html'))
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
))
self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
))
self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
self.assertTrue(is_html( # UTF-32-LE
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
def test_render_table(self):
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]]),
'a bcd\n'
'123 4\n'
'9999 51')
def test_match_str(self):
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
self.assertFalse(match_str('xy', {'x': 1200}))
self.assertTrue(match_str('!xy', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 1200}))
self.assertFalse(match_str('!x', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 0}))
self.assertFalse(match_str('x>0', {'x': 0}))
self.assertFalse(match_str('x>0', {}))
self.assertTrue(match_str('x>?0', {}))
self.assertTrue(match_str('x>1K', {'x': 1200}))
self.assertFalse(match_str('x>2K', {'x': 1200}))
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 90, 'description': 'foo'}))
self.assertTrue(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
self.assertTrue(match_str('is_live', {'is_live': True}))
self.assertFalse(match_str('is_live', {'is_live': False}))
self.assertFalse(match_str('is_live', {'is_live': None}))
self.assertFalse(match_str('is_live', {}))
self.assertFalse(match_str('!is_live', {'is_live': True}))
self.assertTrue(match_str('!is_live', {'is_live': False}))
self.assertTrue(match_str('!is_live', {'is_live': None}))
self.assertTrue(match_str('!is_live', {}))
self.assertTrue(match_str('title', {'title': 'abc'}))
self.assertTrue(match_str('title', {'title': ''}))
self.assertFalse(match_str('!title', {'title': 'abc'}))
self.assertFalse(match_str('!title', {'title': ''}))
def test_parse_dfxp_time_expr(self):
self.assertEqual(parse_dfxp_time_expr(None), None)
self.assertEqual(parse_dfxp_time_expr(''), None)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
<p begin="3" end="-1">Lines with invalid timestamps are ignored</p>
<p begin="-1" end="-1">Ignore, two</p>
<p begin="3" dur="-1">Ignored, three</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols
2
00:00:01,000 --> 00:00:02,000
第二行
♪♪
3
00:00:02,000 --> 00:00:03,000
Third
Line
'''
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The first line</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
dfxp_data_with_style = '''<?xml version="1.0" encoding="utf-8"?>
<tt xmlns="http://www.w3.org/2006/10/ttaf1" xmlns:ttp="http://www.w3.org/2006/10/ttaf1#parameter" ttp:timeBase="media" xmlns:tts="http://www.w3.org/2006/10/ttaf1#style" xml:lang="en" xmlns:ttm="http://www.w3.org/2006/10/ttaf1#metadata">
<head>
<styling>
<style id="s2" style="s0" tts:color="cyan" tts:fontWeight="bold" />
<style id="s1" style="s0" tts:color="yellow" tts:fontStyle="italic" />
<style id="s3" style="s0" tts:color="lime" tts:textDecoration="underline" />
<style id="s0" tts:backgroundColor="black" tts:fontStyle="normal" tts:fontSize="16" tts:fontFamily="sansSerif" tts:color="white" />
</styling>
</head>
<body tts:textAlign="center" style="s0">
<div>
<p begin="00:00:02.08" id="p0" end="00:00:05.84">default style<span tts:color="red">custom style</span></p>
<p style="s2" begin="00:00:02.08" id="p0" end="00:00:05.84"><span tts:color="lime">part 1<br /></span><span tts:color="cyan">part 2</span></p>
<p style="s3" begin="00:00:05.84" id="p1" end="00:00:09.56">line 3<br />part 3</p>
<p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p>
</div>
</body>
</tt>'''.encode('utf-8')
srt_data = '''1
00:00:02,080 --> 00:00:05,839
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
2
00:00:02,080 --> 00:00:05,839
<b><font color="cyan" face="sansSerif" size="16"><font color="lime">part 1
</font>part 2</font></b>
3
00:00:05,839 --> 00:00:09,560
<u><font color="lime">line 3
part 3</font></u>
4
00:00:09,560 --> 00:00:12,359
<i><u><font color="yellow"><font color="lime">inner
</font>style</font></u></i>
'''
self.assertEqual(dfxp2srt(dfxp_data_with_style), srt_data)
dfxp_data_non_utf8 = '''<?xml version="1.0" encoding="UTF-16"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">Line 1</p>
<p begin="1" end="2">第二行</p>
</div>
</body>
</tt>'''.encode('utf-16')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
Line 1
2
00:00:01,000 --> 00:00:02,000
第二行
'''
self.assertEqual(dfxp2srt(dfxp_data_non_utf8), srt_data)
def test_cli_option(self):
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({'retries': 10}, '--retries', 'retries'), ['--retries', '10'])
def test_cli_valueless_option(self):
self.assertEqual(cli_valueless_option(
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
self.assertEqual(cli_valueless_option(
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
def test_cli_bool_option(self):
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
['--no-check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
['--no-check-certificate=true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
self.assertEqual(
cli_bool_option(
{}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
[])
def test_ohdave_rsa_encrypt(self):
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
e = 65537
self.assertEqual(
ohdave_rsa_encrypt(b'aa111222', e, N),
'726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
def test_pkcs1pad(self):
data = [1, 2, 3]
padded_data = pkcs1pad(data, 32)
self.assertEqual(padded_data[:2], [0, 2])
self.assertEqual(padded_data[28:], [0, 1, 2, 3])
self.assertRaises(ValueError, pkcs1pad, data, 8)
def test_encode_base_n(self):
self.assertEqual(encode_base_n(0, 30), '0')
self.assertEqual(encode_base_n(80, 30), '2k')
custom_table = '9876543210ZYXWVUTSRQPONMLKJIHGFEDCBA'
self.assertEqual(encode_base_n(0, 30, custom_table), '9')
self.assertEqual(encode_base_n(80, 30, custom_table), '7P')
self.assertRaises(ValueError, encode_base_n, 0, 70)
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
def test_caesar(self):
self.assertEqual(caesar('ace', 'abcdef', 2), 'cea')
self.assertEqual(caesar('cea', 'abcdef', -2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', -2), 'eac')
self.assertEqual(caesar('eac', 'abcdef', 2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', 0), 'ace')
self.assertEqual(caesar('xyz', 'abcdef', 2), 'xyz')
self.assertEqual(caesar('abc', 'acegik', 2), 'ebg')
self.assertEqual(caesar('ebg', 'acegik', -2), 'abc')
def test_rot47(self):
self.assertEqual(rot47('yt-dlp'), r'JE\5=A')
self.assertEqual(rot47('YT-DLP'), r'*%\s{!')
def test_urshift(self):
self.assertEqual(urshift(3, 1), 1)
self.assertEqual(urshift(-3, 1), 2147483646)
def test_get_element_by_class(self):
html = '''
<span class="foo bar">nice</span>
'''
self.assertEqual(get_element_by_class('foo', html), 'nice')
self.assertEqual(get_element_by_class('no-such-class', html), None)
def test_get_element_by_attribute(self):
html = '''
<span class="foo bar">nice</span>
'''
self.assertEqual(get_element_by_attribute('class', 'foo bar', html), 'nice')
self.assertEqual(get_element_by_attribute('class', 'foo', html), None)
self.assertEqual(get_element_by_attribute('class', 'no-such-foo', html), None)
html = '''
<div itemprop="author" itemscope>foo</div>
'''
self.assertEqual(get_element_by_attribute('itemprop', 'author', html), 'foo')
def test_get_elements_by_class(self):
html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('no-such-class', html), [])
def test_get_elements_by_attribute(self):
html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
self.assertEqual(get_elements_by_attribute('class', 'foo bar', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_attribute('class', 'foo', html), [])
self.assertEqual(get_elements_by_attribute('class', 'no-such-foo', html), [])
def test_iri_to_uri(self):
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=foo&ie=utf-8&oe=utf-8&client=firefox-b'),
'https://www.google.com/search?q=foo&ie=utf-8&oe=utf-8&client=firefox-b') # Same
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=Käsesoßenrührlöffel'), # German for cheese sauce stirring spoon
'https://www.google.com/search?q=K%C3%A4seso%C3%9Fenr%C3%BChrl%C3%B6ffel')
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=lt<+gt>+eq%3D+amp%26+percent%25+hash%23+colon%3A+tilde~ 'https://www.google.com/search?q=lt%3C+gt%3E+eq%3D+amp%26+percent%25+hash%23+colon%3A+tilde~ self.assertEqual(
iri_to_uri('http://правозащита38.рф/category/news/'),
'http://xn--38-6kcaak9aj5chl4a3g.xn--p1ai/category/news/')
self.assertEqual(
iri_to_uri('http://www.правозащита38.рф/category/news/'),
'http://www.xn--38-6kcaak9aj5chl4a3g.xn--p1ai/category/news/')
self.assertEqual(
iri_to_uri('https://i❤.ws/emojidomain/👍👏🤝💪'),
'https://xn--i-7iq.ws/emojidomain/%F0%9F%91%8D%F0%9F%91%8F%F0%9F%A4%9D%F0%9F%92%AA')
self.assertEqual(
iri_to_uri('http://日本語.jp/'),
'http://xn--wgv71a119e.jp/')
self.assertEqual(
iri_to_uri('http://导航.中国/'),
'http://xn--fet810g.xn--fiqs8s/')
def test_clean_podcast_url(self):
self.assertEqual(clean_podcast_url('https://www.podtrac.com/pts/redirect.mp3/chtbl.com/track/5899E/traffic.megaphone.fm/HSW7835899191.mp3'), 'https://traffic.megaphone.fm/HSW7835899191.mp3')
self.assertEqual(clean_podcast_url('https://play.podtrac.com/npr-344098539/edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3'), 'https://edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3')
def test_LazyList(self):
it = list(range(10))
self.assertEqual(list(LazyList(it)), it)
self.assertEqual(LazyList(it).exhaust(), it)
self.assertEqual(LazyList(it)[5], it[5])
self.assertEqual(LazyList(it)[5:], it[5:])
self.assertEqual(LazyList(it)[:5], it[:5])
self.assertEqual(LazyList(it)[::2], it[::2])
self.assertEqual(LazyList(it)[1::2], it[1::2])
self.assertEqual(LazyList(it)[5::-1], it[5::-1])
self.assertEqual(LazyList(it)[6:2:-2], it[6:2:-2])
self.assertEqual(LazyList(it)[::-1], it[::-1])
self.assertTrue(LazyList(it))
self.assertFalse(LazyList(range(0)))
self.assertEqual(len(LazyList(it)), len(it))
self.assertEqual(repr(LazyList(it)), repr(it))
self.assertEqual(str(LazyList(it)), str(it))
self.assertEqual(list(LazyList(it).reverse()), it[::-1])
self.assertEqual(list(LazyList(it).reverse()[1:3:7]), it[::-1][1:3:7])
self.assertEqual(list(LazyList(it).reverse()[::-1]), it)
def test_LazyList_laziness(self):
def test(ll, idx, val, cache):
self.assertEqual(ll[idx], val)
self.assertEqual(getattr(ll, '_LazyList__cache'), list(cache))
ll = LazyList(range(10))
test(ll, 0, 0, range(1))
test(ll, 5, 5, range(6))
test(ll, -3, 7, range(10))
ll = LazyList(range(10)).reverse()
test(ll, -1, 0, range(1))
test(ll, 3, 6, range(10))
ll = LazyList(itertools.count())
test(ll, 10, 10, range(11))
ll.reverse()
test(ll, -15, 14, range(15))
if __name__ == '__main__':
unittest.main()
| true | true |
f73e7b95fd87752904a205042103207418004d40 | 5,007 | py | Python | python/qipy/worktree.py | aldebaran/qibuild | efea6fa3744664348717fe5e8df708a3cf392072 | [
"BSD-3-Clause"
] | 51 | 2015-01-05T14:35:13.000Z | 2021-07-27T06:46:59.000Z | python/qipy/worktree.py | aldebaran/qibuild | efea6fa3744664348717fe5e8df708a3cf392072 | [
"BSD-3-Clause"
] | 104 | 2015-04-09T10:48:42.000Z | 2020-09-16T16:33:29.000Z | python/qipy/worktree.py | aldebaran/qibuild | efea6fa3744664348717fe5e8df708a3cf392072 | [
"BSD-3-Clause"
] | 46 | 2015-01-05T14:35:16.000Z | 2022-02-13T20:39:36.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Worktree """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import virtualenv
from qisys import ui
import qisys.worktree
import qisys.qixml
import qipy.project
class PythonWorkTree(qisys.worktree.WorkTreeObserver):
""" Container for Python projects """
def __init__(self, worktree, config="system"):
""" PythonWorkTree Init """
self.worktree = worktree
self.python_projects = list()
self._load_python_projects()
self.config = config
worktree.register(self)
def reload(self):
""" Reload """
self._load_python_projects()
@property
def root(self):
""" Root """
return self.worktree.root
def _load_python_projects(self):
""" Load Python Projects """
seen_names = dict()
self.python_projects = list()
for project in self.worktree.projects:
qiproject_xml = os.path.join(project.path, "qiproject.xml")
if not os.path.exists(qiproject_xml):
continue
new_project = new_python_project(self, project)
if not new_project:
continue
if new_project.name in seen_names:
mess = """ \
Found two projects with the same name. (%s)
%s
%s
""" % (new_project.name, seen_names[new_project.name], new_project.src)
raise Exception(mess)
self.python_projects.append(new_project)
seen_names[new_project.name] = new_project.src
def get_python_project(self, name, raises=False):
""" Get a Python project given its name """
for project in self.python_projects:
if project.name == name:
return project
if raises:
mess = ui.did_you_mean("No such python project: %s" % name,
name, [x.name for x in self.python_projects])
raise qisys.worktree.NoSuchProject(name, mess)
else:
return None
def bin_path(self, name):
""" Path to the virtualenv's binaries """
binaries_path = virtualenv.path_locations(self.venv_path)[-1]
return os.path.join(binaries_path, name)
@property
def venv_path(self):
""" Path to the virtualenv """
res = os.path.join(self.worktree.dot_qi, "venvs",
self.config)
return res
@property
def pip(self):
""" Path to the pip binary """
return self.bin_path("pip")
@property
def python(self):
""" Path to the python executable in the virtualenv """
return self.bin_path("python")
def activate_this(self):
""" Activate this virtualenv """
activate_this_dot_py = self.bin_path("activate_this.py")
execfile(activate_this_dot_py, {"__file__": activate_this_dot_py})
def new_python_project(worktree, project):
""" New Python Project """
qiproject_xml = project.qiproject_xml
tree = qisys.qixml.read(qiproject_xml)
qipython_elem = tree.find("qipython")
if qipython_elem is None:
return None
name = qisys.qixml.parse_required_attr(qipython_elem, "name",
xml_path=qiproject_xml)
python_project = qipy.project.PythonProject(worktree, project.src, name)
script_elems = qipython_elem.findall("script")
for script_elem in script_elems:
src = qisys.qixml.parse_required_attr(script_elem, "src",
xml_path=qiproject_xml)
script = qipy.project.Script(src)
python_project.scripts.append(script)
module_elems = qipython_elem.findall("module")
for module_elem in module_elems:
src = module_elem.get("src", "")
name = qisys.qixml.parse_required_attr(module_elem, "name",
xml_path=qiproject_xml)
module = qipy.project.Module(name, src)
module.qimodule = qisys.qixml.parse_bool_attr(module_elem, "qimodule")
python_project.modules.append(module)
package_elems = qipython_elem.findall("package")
for package_elem in package_elems:
name = qisys.qixml.parse_required_attr(package_elem, "name",
xml_path=qiproject_xml)
src = package_elem.get("src", "")
package = qipy.project.Package(name, src)
package.qimodule = qisys.qixml.parse_bool_attr(package_elem, "qimodule")
python_project.packages.append(package)
setup_elem = qipython_elem.find("setup")
if setup_elem is not None:
python_project.setup_with_distutils = \
qisys.qixml.parse_bool_attr(setup_elem, "with_distutils")
return python_project
| 36.282609 | 84 | 0.628121 |
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import virtualenv
from qisys import ui
import qisys.worktree
import qisys.qixml
import qipy.project
class PythonWorkTree(qisys.worktree.WorkTreeObserver):
def __init__(self, worktree, config="system"):
self.worktree = worktree
self.python_projects = list()
self._load_python_projects()
self.config = config
worktree.register(self)
def reload(self):
self._load_python_projects()
@property
def root(self):
return self.worktree.root
def _load_python_projects(self):
seen_names = dict()
self.python_projects = list()
for project in self.worktree.projects:
qiproject_xml = os.path.join(project.path, "qiproject.xml")
if not os.path.exists(qiproject_xml):
continue
new_project = new_python_project(self, project)
if not new_project:
continue
if new_project.name in seen_names:
mess = """ \
Found two projects with the same name. (%s)
%s
%s
""" % (new_project.name, seen_names[new_project.name], new_project.src)
raise Exception(mess)
self.python_projects.append(new_project)
seen_names[new_project.name] = new_project.src
def get_python_project(self, name, raises=False):
for project in self.python_projects:
if project.name == name:
return project
if raises:
mess = ui.did_you_mean("No such python project: %s" % name,
name, [x.name for x in self.python_projects])
raise qisys.worktree.NoSuchProject(name, mess)
else:
return None
def bin_path(self, name):
binaries_path = virtualenv.path_locations(self.venv_path)[-1]
return os.path.join(binaries_path, name)
@property
def venv_path(self):
res = os.path.join(self.worktree.dot_qi, "venvs",
self.config)
return res
@property
def pip(self):
return self.bin_path("pip")
@property
def python(self):
return self.bin_path("python")
def activate_this(self):
activate_this_dot_py = self.bin_path("activate_this.py")
execfile(activate_this_dot_py, {"__file__": activate_this_dot_py})
def new_python_project(worktree, project):
qiproject_xml = project.qiproject_xml
tree = qisys.qixml.read(qiproject_xml)
qipython_elem = tree.find("qipython")
if qipython_elem is None:
return None
name = qisys.qixml.parse_required_attr(qipython_elem, "name",
xml_path=qiproject_xml)
python_project = qipy.project.PythonProject(worktree, project.src, name)
script_elems = qipython_elem.findall("script")
for script_elem in script_elems:
src = qisys.qixml.parse_required_attr(script_elem, "src",
xml_path=qiproject_xml)
script = qipy.project.Script(src)
python_project.scripts.append(script)
module_elems = qipython_elem.findall("module")
for module_elem in module_elems:
src = module_elem.get("src", "")
name = qisys.qixml.parse_required_attr(module_elem, "name",
xml_path=qiproject_xml)
module = qipy.project.Module(name, src)
module.qimodule = qisys.qixml.parse_bool_attr(module_elem, "qimodule")
python_project.modules.append(module)
package_elems = qipython_elem.findall("package")
for package_elem in package_elems:
name = qisys.qixml.parse_required_attr(package_elem, "name",
xml_path=qiproject_xml)
src = package_elem.get("src", "")
package = qipy.project.Package(name, src)
package.qimodule = qisys.qixml.parse_bool_attr(package_elem, "qimodule")
python_project.packages.append(package)
setup_elem = qipython_elem.find("setup")
if setup_elem is not None:
python_project.setup_with_distutils = \
qisys.qixml.parse_bool_attr(setup_elem, "with_distutils")
return python_project
| true | true |
f73e7bbe6022e667fd8dc11750c07030d43ac5d5 | 6,067 | py | Python | port.py | yikir/mmdetection | dfceb61b0252f81b010f550f2acbe46c7dad6ef6 | [
"Apache-2.0"
] | null | null | null | port.py | yikir/mmdetection | dfceb61b0252f81b010f550f2acbe46c7dad6ef6 | [
"Apache-2.0"
] | null | null | null | port.py | yikir/mmdetection | dfceb61b0252f81b010f550f2acbe46c7dad6ef6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# n(net) o(oil) h(hang) r(rust) 检测模块
import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir)
from mmdet.models import build_detector
import mmcv
import torch
import cv2
import time
import json
from mmcv.runner import load_checkpoint
import PIL.Image as Image
import numpy as np
from torchvision.transforms import transforms
import pycocotools.mask as maskUtils
current_dir = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(current_dir, 'configs','config_cascade_rcnn.py')
weight_file = '/home/kilox/weights/nohr_best.pth'
# weight_file = '/Weights/verified/oil_detection_v1/oil_best.pth'
class Object(object):
def __init__(self):
self.class_name = "Unknown"
self.trust = 0.0
self.rank = 0
def to_json(self):
return json.dumps(self.__dict__)
class Port:
def __init__(self):
self.cfg = mmcv.Config.fromfile(config_file)
# 创建模型 , test_cfg 是rpn rcnn的nms等配置
self.detector = build_detector(self.cfg.model, train_cfg=None, test_cfg=self.cfg.test_cfg)
# 加载权重
load_checkpoint(self.detector, weight_file, map_location='cpu')
self.detector = self.detector.to('cuda')
self.detector.eval()
self.class_names = ('油污','鸟巢','锈蚀','飘挂物')
def process(self, image,save=None):
"""
:param image: PIL.Image 输入图像
"""
np_image = np.asarray(image)
img, img_meta = self.prepare_single(np_image)
# forward
with torch.no_grad():
# 传入rescale则代表返回的mask是原图的
result = self.detector.simple_test(img, [img_meta], proposals=None, rescale=True)
# 将mask 以及bbox画在图上
img = self.draw_image(np_image, img_meta, result)
real_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))
output_file_name = os.path.join(real_time + '.jpg')
cv2.imwrite(output_file_name, img)
return False,None,output_file_name
# 将图片添加meta的函数
def prepare_single(self,img):
img_info = {'height': img.shape[0], 'width': img.shape[1]}
img_norm_cfg = self.cfg.img_norm_cfg
size_divisor = self.cfg.data.test.size_divisor
img, scale_factor = mmcv.imrescale(img, (4014,2400), return_scale=True)
img_shape = img.shape
img = mmcv.imnormalize(img, img_norm_cfg.mean, img_norm_cfg.std, img_norm_cfg.to_rgb)
img = mmcv.impad_to_multiple(img, size_divisor)
pad_shape = img.shape
_img = transforms.ToTensor()(img).float()
_img = _img.unsqueeze(0)
_img_meta = dict(
ori_shape=(img_info['height'], img_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=False)
_img = _img.to('cuda')
return _img, _img_meta,
def draw_image(self,img, meta, result, score_thr=0.9):
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
h, w, _ = meta['ori_shape']
img_show = img[:h, :w, :].copy()
bboxes = np.vstack(bbox_result)
# 画mask
# # draw segmentation masks
# if segm_result is not None:
# segms = mmcv.concat_list(segm_result)
# inds = np.where(bboxes[:, -1] > score_thr)[0]
# for i in inds:
# color_mask = np.random.randint(
# 0, 256, (1, 3), dtype=np.uint8)
# mask = maskUtils.decode(segms[i]).astype(np.bool)
# # todo fix dimension not equal
# img_check_shape = tuple(img_show.shape[0:2])
# if mask.shape != img_check_shape:
# width_diff = mask.shape[1] - img_check_shape[1]
# if mask.shape[1] < img_check_shape[1]:
# mask = np.pad(mask, (0, width_diff), mode='constant', constant_values=False)
# np.insert(mask, False, )
# else:
# mask = mask[:, :-width_diff]
# img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
# 画bbox
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
for bbox, label in zip(bboxes, labels):
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
cv2.rectangle(
img_show, left_top, right_bottom, (0, 255, 0), thickness=2)
label_text = self.class_names[
label] if self.class_names is not None else 'cls {}'.format(label)
if len(bbox) > 4:
label_text += '|{:.02f}'.format(bbox[-1])
cv2.putText(img_show, label_text, (bbox_int[0], bbox_int[1] - 2),
cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0))
return img_show
def test():
pass
if __name__ == '__main__':
im = Image.open('/home/kilox/3.jpg')
port = Port()
print(port.process(im,True))
| 36.993902 | 102 | 0.575573 |
import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir)
from mmdet.models import build_detector
import mmcv
import torch
import cv2
import time
import json
from mmcv.runner import load_checkpoint
import PIL.Image as Image
import numpy as np
from torchvision.transforms import transforms
import pycocotools.mask as maskUtils
current_dir = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(current_dir, 'configs','config_cascade_rcnn.py')
weight_file = '/home/kilox/weights/nohr_best.pth'
class Object(object):
def __init__(self):
self.class_name = "Unknown"
self.trust = 0.0
self.rank = 0
def to_json(self):
return json.dumps(self.__dict__)
class Port:
def __init__(self):
self.cfg = mmcv.Config.fromfile(config_file)
self.detector = build_detector(self.cfg.model, train_cfg=None, test_cfg=self.cfg.test_cfg)
load_checkpoint(self.detector, weight_file, map_location='cpu')
self.detector = self.detector.to('cuda')
self.detector.eval()
self.class_names = ('油污','鸟巢','锈蚀','飘挂物')
def process(self, image,save=None):
np_image = np.asarray(image)
img, img_meta = self.prepare_single(np_image)
with torch.no_grad():
result = self.detector.simple_test(img, [img_meta], proposals=None, rescale=True)
img = self.draw_image(np_image, img_meta, result)
real_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))
output_file_name = os.path.join(real_time + '.jpg')
cv2.imwrite(output_file_name, img)
return False,None,output_file_name
def prepare_single(self,img):
img_info = {'height': img.shape[0], 'width': img.shape[1]}
img_norm_cfg = self.cfg.img_norm_cfg
size_divisor = self.cfg.data.test.size_divisor
img, scale_factor = mmcv.imrescale(img, (4014,2400), return_scale=True)
img_shape = img.shape
img = mmcv.imnormalize(img, img_norm_cfg.mean, img_norm_cfg.std, img_norm_cfg.to_rgb)
img = mmcv.impad_to_multiple(img, size_divisor)
pad_shape = img.shape
_img = transforms.ToTensor()(img).float()
_img = _img.unsqueeze(0)
_img_meta = dict(
ori_shape=(img_info['height'], img_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=False)
_img = _img.to('cuda')
return _img, _img_meta,
def draw_image(self,img, meta, result, score_thr=0.9):
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
h, w, _ = meta['ori_shape']
img_show = img[:h, :w, :].copy()
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
assert bboxes.shape[1] == 5
scores = bboxes[:, -1]
inds = scores > score_thr
bboxes = bboxes[inds, :]
labels = labels[inds]
for bbox, label in zip(bboxes, labels):
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
right_bottom = (bbox_int[2], bbox_int[3])
cv2.rectangle(
img_show, left_top, right_bottom, (0, 255, 0), thickness=2)
label_text = self.class_names[
label] if self.class_names is not None else 'cls {}'.format(label)
if len(bbox) > 4:
label_text += '|{:.02f}'.format(bbox[-1])
cv2.putText(img_show, label_text, (bbox_int[0], bbox_int[1] - 2),
cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0))
return img_show
def test():
pass
if __name__ == '__main__':
im = Image.open('/home/kilox/3.jpg')
port = Port()
print(port.process(im,True))
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.